diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index bd47abc710..a08e7aacae 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -99,7 +99,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 76fbd18f47..9c2c6e2ca9 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -116,6 +116,12 @@ jobs: if: github.event_name != 'merge_group' uses: ./.github/actions/setup-web + - name: Generate API docs + if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' + run: | + cd api + uv run dev/generate_swagger_markdown_docs.py --swagger-dir openapi --markdown-dir openapi/markdown + - name: ESLint autofix if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' run: | diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml index 65f0149a74..9d3ccb34b2 100644 --- a/.github/workflows/db-migration-test.yml +++ b/.github/workflows/db-migration-test.yml @@ -37,7 +37,7 @@ jobs: - name: Prepare middleware env run: | cd docker - cp middleware.env.example middleware.env + cp envs/middleware.env.example middleware.env - name: Set up Middlewares uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 @@ -87,7 +87,7 @@ jobs: - name: Prepare middleware env for MySQL run: | cd docker - cp middleware.env.example middleware.env + cp envs/middleware.env.example middleware.env sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env diff --git a/.github/workflows/main-ci.yml b/.github/workflows/main-ci.yml index 8071d6204d..f624e8f872 100644 --- a/.github/workflows/main-ci.yml +++ b/.github/workflows/main-ci.yml @@ -57,7 +57,7 @@ jobs: - '.github/workflows/api-tests.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.middleware.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' @@ -84,7 +84,7 @@ jobs: - 'pnpm-workspace.yaml' - '.nvmrc' - 'docker/docker-compose.middleware.yaml' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - '.github/workflows/web-e2e.yml' - '.github/actions/setup-web/**' vdb: @@ -94,7 +94,7 @@ jobs: - '.github/workflows/vdb-tests.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' @@ -116,7 +116,7 @@ jobs: - '.github/workflows/db-migration-test.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.middleware.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' diff --git a/.github/workflows/pyrefly-diff-comment.yml b/.github/workflows/pyrefly-diff-comment.yml index 7f82942e7e..8e16baf933 100644 --- a/.github/workflows/pyrefly-diff-comment.yml +++ b/.github/workflows/pyrefly-diff-comment.yml @@ -77,10 +77,28 @@ jobs: } if (diff.trim()) { - await github.rest.issues.createComment({ + const body = '### Pyrefly Diff\n
\nbase → PR\n\n```diff\n' + diff + '\n```\n
'; + const marker = '### Pyrefly Diff'; + const { data: comments } = await github.rest.issues.listComments({ issue_number: prNumber, owner: context.repo.owner, repo: context.repo.repo, - body: '### Pyrefly Diff\n
\nbase → PR\n\n```diff\n' + diff + '\n```\n
', }); + const existing = comments.find((comment) => comment.body.startsWith(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + comment_id: existing.id, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } else { + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } } diff --git a/.github/workflows/pyrefly-diff.yml b/.github/workflows/pyrefly-diff.yml index 0cf54e3585..386bd25751 100644 --- a/.github/workflows/pyrefly-diff.yml +++ b/.github/workflows/pyrefly-diff.yml @@ -103,9 +103,26 @@ jobs: ].join('\n') : '### Pyrefly Diff\nNo changes detected.'; - await github.rest.issues.createComment({ + const marker = '### Pyrefly Diff'; + const { data: comments } = await github.rest.issues.listComments({ issue_number: prNumber, owner: context.repo.owner, repo: context.repo.repo, - body, }); + const existing = comments.find((comment) => comment.body.startsWith(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + comment_id: existing.id, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } else { + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } diff --git a/.github/workflows/vdb-tests-full.yml b/.github/workflows/vdb-tests-full.yml index 5c241af5c5..1405eb4eeb 100644 --- a/.github/workflows/vdb-tests-full.yml +++ b/.github/workflows/vdb-tests-full.yml @@ -51,7 +51,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index 38ec96f00f..cdcdcb27d7 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -48,7 +48,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/Makefile b/Makefile index d8c9df5208..ae7589bbd6 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ type-check: @echo "📝 Running type checks (basedpyright + pyrefly + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) @./dev/pyrefly-check-local - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Type checks complete" type-check-core: @echo "📝 Running core type checks (basedpyright + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --exclude 'dev/generate_fastopenapi_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Core type checks complete" test: diff --git a/README.md b/README.md index e6f8d84931..b6cbb0e126 100644 --- a/README.md +++ b/README.md @@ -76,11 +76,10 @@ The easiest way to start the Dify server is through [Docker Compose](docker/dock ```bash cd dify cd docker -./dify-compose up -d +cp .env.example .env +docker compose up -d ``` -On Windows PowerShell, run `.\dify-compose.ps1 up -d` from the `docker` directory. - After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process. #### Seeking help @@ -138,7 +137,7 @@ Star Dify on GitHub and be instantly notified of new releases. ### Custom configurations -If you need to customize the configuration, add only the values you want to override to `docker/.env`. The default values live in [`docker/.env.default`](docker/.env.default), and the full reference remains in [`docker/.env.example`](docker/.env.example). After making any changes, re-run `./dify-compose up -d` or `.\dify-compose.ps1 up -d` from the `docker` directory. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). +If you need to customize the configuration, edit `docker/.env`. The essential startup defaults live in [`docker/.env.example`](docker/.env.example), and optional advanced variables are split under `docker/envs/` by theme. After making any changes, re-run `docker compose up -d` from the `docker` directory. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). ### Metrics Monitoring with Grafana diff --git a/api/.env.example b/api/.env.example index f6f65011ea..56ba8a6c5d 100644 --- a/api/.env.example +++ b/api/.env.example @@ -98,6 +98,8 @@ DB_DATABASE=dify SQLALCHEMY_POOL_PRE_PING=true SQLALCHEMY_POOL_TIMEOUT=30 +# Connection pool reset behavior on return +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback # Storage configuration # use for store upload files, private keys... @@ -381,7 +383,7 @@ VIKINGDB_ACCESS_KEY=your-ak VIKINGDB_SECRET_KEY=your-sk VIKINGDB_REGION=cn-shanghai VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http +VIKINGDB_SCHEME=http VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30 @@ -432,8 +434,6 @@ UPLOAD_FILE_EXTENSION_BLACKLIST= # Model configuration MULTIMODAL_SEND_FORMAT=base64 -PROMPT_GENERATION_MAX_TOKENS=512 -CODE_GENERATION_MAX_TOKENS=1024 PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false # Mail configuration, support: resend, smtp, sendgrid diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py index c392b8840f..ee8b93aa9f 100644 --- a/api/configs/middleware/__init__.py +++ b/api/configs/middleware/__init__.py @@ -114,7 +114,7 @@ class SQLAlchemyEngineOptionsDict(TypedDict): pool_pre_ping: bool connect_args: dict[str, str] pool_use_lifo: bool - pool_reset_on_return: None + pool_reset_on_return: Literal["commit", "rollback", None] pool_timeout: int @@ -223,6 +223,11 @@ class DatabaseConfig(BaseSettings): default=30, ) + SQLALCHEMY_POOL_RESET_ON_RETURN: Literal["commit", "rollback", None] = Field( + description="Connection pool reset behavior on return. Options: 'commit', 'rollback', or None", + default="rollback", + ) + RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field( description="Number of processes for the retrieval service, default to CPU cores.", default=os.cpu_count() or 1, @@ -252,7 +257,7 @@ class DatabaseConfig(BaseSettings): "pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING, "connect_args": connect_args, "pool_use_lifo": self.SQLALCHEMY_POOL_USE_LIFO, - "pool_reset_on_return": None, + "pool_reset_on_return": self.SQLALCHEMY_POOL_RESET_ON_RETURN, "pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT, } return result diff --git a/api/constants/recommended_apps.json b/api/constants/recommended_apps.json index 3779fb0180..3d728f1b2e 100644 --- a/api/constants/recommended_apps.json +++ b/api/constants/recommended_apps.json @@ -19,7 +19,7 @@ "name": "Website Generator" }, "app_id": "b53545b1-79ea-4da3-b31a-c39391c6f041", - "category": "Programming", + "categories": ["Programming"], "copyright": null, "description": null, "is_listed": true, @@ -35,7 +35,7 @@ "name": "Investment Analysis Report Copilot" }, "app_id": "a23b57fa-85da-49c0-a571-3aff375976c1", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "Welcome to your personalized Investment Analysis Copilot service, where we delve into the depths of stock analysis to provide you with comprehensive insights. \n", "is_listed": true, @@ -51,7 +51,7 @@ "name": "Workflow Planning Assistant " }, "app_id": "f3303a7d-a81c-404e-b401-1f8711c998c1", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "An assistant that helps you plan and select the right node for a workflow (V0.6.0). ", "is_listed": true, @@ -67,7 +67,7 @@ "name": "Automated Email Reply " }, "app_id": "e9d92058-7d20-4904-892f-75d90bef7587", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Reply emails using Gmail API. It will automatically retrieve email in your inbox and create a response in Gmail. \nConfigure your Gmail API in Google Cloud Console. ", "is_listed": true, @@ -83,7 +83,7 @@ "name": "Book Translation " }, "app_id": "98b87f88-bd22-4d86-8b74-86beba5e0ed4", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A workflow designed to translate a full book up to 15000 tokens per run. Uses Code node to separate text into chunks and Iteration to translate each chunk. ", "is_listed": true, @@ -99,7 +99,7 @@ "name": "Python bug fixer" }, "app_id": "cae337e6-aec5-4c7b-beca-d6f1a808bd5e", - "category": "Programming", + "categories": ["Programming"], "copyright": null, "description": null, "is_listed": true, @@ -115,7 +115,7 @@ "name": "Code Interpreter" }, "app_id": "d077d587-b072-4f2c-b631-69ed1e7cdc0f", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "Code interpreter, clarifying the syntax and semantics of the code.", "is_listed": true, @@ -131,7 +131,7 @@ "name": "SVG Logo Design " }, "app_id": "73fbb5f1-c15d-4d74-9cc8-46d9db9b2cca", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "Hello, I am your creative partner in bringing ideas to vivid life! I can assist you in creating stunning designs by leveraging abilities of DALL·E 3. ", "is_listed": true, @@ -147,7 +147,7 @@ "name": "Long Story Generator (Iteration) " }, "app_id": "5efb98d7-176b-419c-b6ef-50767391ab62", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A workflow demonstrating how to use Iteration node to generate long article that is longer than the context length of LLMs. ", "is_listed": true, @@ -163,7 +163,7 @@ "name": "Text Summarization Workflow" }, "app_id": "f00c4531-6551-45ee-808f-1d7903099515", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Based on users' choice, retrieve external knowledge to more accurately summarize articles.", "is_listed": true, @@ -179,7 +179,7 @@ "name": "YouTube Channel Data Analysis" }, "app_id": "be591209-2ca8-410f-8f3b-ca0e530dd638", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "I am a YouTube Channel Data Analysis Copilot, I am here to provide expert data analysis tailored to your needs. ", "is_listed": true, @@ -195,7 +195,7 @@ "name": "Article Grading Bot" }, "app_id": "a747f7b4-c48b-40d6-b313-5e628232c05f", - "category": "Writing", + "categories": ["Writing"], "copyright": null, "description": "Assess the quality of articles and text based on user defined criteria. ", "is_listed": true, @@ -211,7 +211,7 @@ "name": "SEO Blog Generator" }, "app_id": "18f3bd03-524d-4d7a-8374-b30dbe7c69d5", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Workflow for retrieving information from the internet, followed by segmented generation of SEO blogs.", "is_listed": true, @@ -227,7 +227,7 @@ "name": "SQL Creator" }, "app_id": "050ef42e-3e0c-40c1-a6b6-a64f2c49d744", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "Write SQL from natural language by pasting in your schema with the request.Please describe your query requirements in natural language and select the target database type.", "is_listed": true, @@ -243,7 +243,7 @@ "name": "Sentiment Analysis " }, "app_id": "f06bf86b-d50c-4895-a942-35112dbe4189", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Batch sentiment analysis of text, followed by JSON output of sentiment classification along with scores.", "is_listed": true, @@ -259,7 +259,7 @@ "name": "Strategic Consulting Expert" }, "app_id": "7e8ca1ae-02f2-4b5f-979e-62d19133bee2", - "category": "Assistant", + "categories": ["Assistant"], "copyright": "Copyright 2023 Dify", "description": "I can answer your questions related to strategic marketing.", "is_listed": true, @@ -275,7 +275,7 @@ "name": "Code Converter" }, "app_id": "4006c4b2-0735-4f37-8dbb-fb1a8c5bd87a", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "This is an application that provides the ability to convert code snippets in multiple programming languages. You can input the code you wish to convert, select the target programming language, and get the desired output.", "is_listed": true, @@ -291,7 +291,7 @@ "name": "Question Classifier + Knowledge + Chatbot " }, "app_id": "d9f6b733-e35d-4a40-9f38-ca7bbfa009f7", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Basic Workflow Template, a chatbot capable of identifying intents alongside with a knowledge base.", "is_listed": true, @@ -307,7 +307,7 @@ "name": "AI Front-end interviewer" }, "app_id": "127efead-8944-4e20-ba9d-12402eb345e0", - "category": "HR", + "categories": ["HR"], "copyright": "Copyright 2023 Dify", "description": "A simulated front-end interviewer that tests the skill level of front-end development through questioning.", "is_listed": true, @@ -323,7 +323,7 @@ "name": "Knowledge Retrieval + Chatbot " }, "app_id": "e9870913-dd01-4710-9f06-15d4180ca1ce", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Basic Workflow Template, A chatbot with a knowledge base. ", "is_listed": true, @@ -339,7 +339,7 @@ "name": "Email Assistant Workflow " }, "app_id": "dd5b6353-ae9b-4bce-be6a-a681a12cf709", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A multifunctional email assistant capable of summarizing, replying, composing, proofreading, and checking grammar.", "is_listed": true, @@ -355,7 +355,7 @@ "name": "Customer Review Analysis Workflow " }, "app_id": "9c0cd31f-4b62-4005-adf5-e3888d08654a", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Utilize LLM (Large Language Models) to classify customer reviews and forward them to the internal system.", "is_listed": true, diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py index 8d112c203b..57070f1c80 100644 --- a/api/controllers/common/schema.py +++ b/api/controllers/common/schema.py @@ -1,6 +1,14 @@ -"""Helpers for registering Pydantic models with Flask-RESTX namespaces.""" +"""Helpers for registering Pydantic models with Flask-RESTX namespaces. +Flask-RESTX treats `SchemaModel` bodies as opaque JSON schemas; it does not +promote Pydantic's nested `$defs` into top-level Swagger `definitions`. +These helpers keep that translation centralized so models registered through +`register_schema_models` emit resolvable Swagger 2.0 references. +""" + +from collections.abc import Mapping from enum import StrEnum +from typing import Any, NotRequired, TypedDict from flask_restx import Namespace from pydantic import BaseModel, TypeAdapter @@ -8,10 +16,52 @@ from pydantic import BaseModel, TypeAdapter DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" -def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: - """Register a single BaseModel with a namespace for Swagger documentation.""" +QueryParamDoc = TypedDict( + "QueryParamDoc", + { + "in": NotRequired[str], + "type": NotRequired[str], + "items": NotRequired[dict[str, object]], + "required": NotRequired[bool], + "description": NotRequired[str], + "enum": NotRequired[list[object]], + "default": NotRequired[object], + "minimum": NotRequired[int | float], + "maximum": NotRequired[int | float], + "minLength": NotRequired[int], + "maxLength": NotRequired[int], + "minItems": NotRequired[int], + "maxItems": NotRequired[int], + }, +) - namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) + +def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None: + """Register a JSON schema and promote any nested Pydantic `$defs`.""" + + nested_definitions = schema.get("$defs") + schema_to_register = dict(schema) + if isinstance(nested_definitions, dict): + schema_to_register.pop("$defs") + + namespace.schema_model(name, schema_to_register) + + if not isinstance(nested_definitions, dict): + return + + for nested_name, nested_schema in nested_definitions.items(): + if isinstance(nested_schema, dict): + _register_json_schema(namespace, nested_name, nested_schema) + + +def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: + """Register a BaseModel and its nested schema definitions for Swagger documentation.""" + + _register_json_schema( + namespace, + model.__name__, + model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), + ) def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: @@ -34,14 +84,111 @@ def get_or_create_model(model_name: str, field_def): def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: """Register multiple StrEnum with a namespace.""" for model in models: - namespace.schema_model( - model.__name__, TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + _register_json_schema( + namespace, + model.__name__, + TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), ) +def query_params_from_model(model: type[BaseModel]) -> dict[str, QueryParamDoc]: + """Build Flask-RESTX query parameter docs from a flat Pydantic model. + + `Namespace.expect()` treats Pydantic schema models as request bodies, so GET + endpoints should keep runtime validation on the Pydantic model and feed this + derived mapping to `Namespace.doc(params=...)` for Swagger documentation. + """ + + schema = model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + properties = schema.get("properties", {}) + if not isinstance(properties, Mapping): + return {} + + required = schema.get("required", []) + required_names = set(required) if isinstance(required, list) else set() + + params: dict[str, QueryParamDoc] = {} + for name, property_schema in properties.items(): + if not isinstance(name, str) or not isinstance(property_schema, Mapping): + continue + + params[name] = _query_param_from_property(property_schema, required=name in required_names) + + return params + + +def _query_param_from_property(property_schema: Mapping[str, Any], *, required: bool) -> QueryParamDoc: + param_schema = _nullable_property_schema(property_schema) + param_doc: QueryParamDoc = {"in": "query", "required": required} + + description = param_schema.get("description") + if isinstance(description, str): + param_doc["description"] = description + + schema_type = param_schema.get("type") + if isinstance(schema_type, str) and schema_type in {"array", "boolean", "integer", "number", "string"}: + param_doc["type"] = schema_type + if schema_type == "array": + items = param_schema.get("items") + if isinstance(items, Mapping): + item_type = items.get("type") + if isinstance(item_type, str): + param_doc["items"] = {"type": item_type} + + enum = param_schema.get("enum") + if isinstance(enum, list): + param_doc["enum"] = enum + + default = param_schema.get("default") + if default is not None: + param_doc["default"] = default + + minimum = param_schema.get("minimum") + if isinstance(minimum, int | float): + param_doc["minimum"] = minimum + + maximum = param_schema.get("maximum") + if isinstance(maximum, int | float): + param_doc["maximum"] = maximum + + min_length = param_schema.get("minLength") + if isinstance(min_length, int): + param_doc["minLength"] = min_length + + max_length = param_schema.get("maxLength") + if isinstance(max_length, int): + param_doc["maxLength"] = max_length + + min_items = param_schema.get("minItems") + if isinstance(min_items, int): + param_doc["minItems"] = min_items + + max_items = param_schema.get("maxItems") + if isinstance(max_items, int): + param_doc["maxItems"] = max_items + + return param_doc + + +def _nullable_property_schema(property_schema: Mapping[str, Any]) -> Mapping[str, Any]: + any_of = property_schema.get("anyOf") + if not isinstance(any_of, list): + return property_schema + + non_null_candidates = [ + candidate for candidate in any_of if isinstance(candidate, Mapping) and candidate.get("type") != "null" + ] + + if len(non_null_candidates) == 1: + return {**property_schema, **non_null_candidates[0]} + + return property_schema + + __all__ = [ "DEFAULT_REF_TEMPLATE_SWAGGER_2_0", "get_or_create_model", + "query_params_from_model", "register_enum_models", "register_schema_model", "register_schema_models", diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index dce394be97..a32c3420bb 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -12,6 +12,7 @@ from werkzeug.exceptions import BadRequest, NotFound, Unauthorized from configs import dify_config from constants.languages import supported_language +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import only_edition_cloud from core.db.session_factory import session_factory @@ -301,15 +302,7 @@ class BatchAddNotificationAccountsPayload(BaseModel): user_email: list[str] = Field(..., description="List of account email addresses") -console_ns.schema_model( - UpsertNotificationPayload.__name__, - UpsertNotificationPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - -console_ns.schema_model( - BatchAddNotificationAccountsPayload.__name__, - BatchAddNotificationAccountsPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, UpsertNotificationPayload, BatchAddNotificationAccountsPayload) @console_ns.route("/admin/upsert_notification") diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index c8334bfd18..58ed6efc14 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -25,6 +25,7 @@ from controllers.console.wraps import ( is_admin_or_owner_required, setup_required, ) +from core.db.session_factory import session_factory from core.ops.ops_trace_manager import OpsTraceManager from core.rag.entities import PreProcessingRule, Rule, Segmentation from core.rag.retrieval.retrieval_methods import RetrievalMethod @@ -841,7 +842,8 @@ class AppTraceApi(Resource): @account_initialization_required def get(self, app_id): """Get app trace""" - app_trace_config = OpsTraceManager.get_app_tracing_config(app_id=app_id) + with session_factory.create_session() as session: + app_trace_config = OpsTraceManager.get_app_tracing_config(app_id, session) return app_trace_config diff --git a/api/controllers/console/app/app_import.py b/api/controllers/console/app/app_import.py index e91dc9cfe5..b653016319 100644 --- a/api/controllers/console/app/app_import.py +++ b/api/controllers/console/app/app_import.py @@ -2,7 +2,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field from sqlalchemy.orm import Session -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console.app.wraps import get_app_model from controllers.console.wraps import ( account_initialization_required, @@ -33,6 +33,7 @@ class AppImportPayload(BaseModel): app_id: str | None = Field(None) +register_enum_models(console_ns, ImportStatus) register_schema_models(console_ns, AppImportPayload, Import, CheckDependenciesResult) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index c720a5e074..d4f501d34c 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -3,6 +3,7 @@ from collections.abc import Sequence from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( CompletionRequestError, @@ -19,13 +20,12 @@ from core.helper.code_executor.python3.python3_code_provider import Python3CodeP from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload from core.llm_generator.llm_generator import LLMGenerator from extensions.ext_database import db +from graphon.model_runtime.entities.llm_entities import LLMMode from graphon.model_runtime.errors.invoke import InvokeError from libs.login import current_account_with_tenant, login_required from models import App from services.workflow_service import WorkflowService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class InstructionGeneratePayload(BaseModel): flow_id: str = Field(..., description="Workflow/Flow ID") @@ -41,16 +41,16 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(RuleGeneratePayload) -reg(RuleCodeGeneratePayload) -reg(RuleStructuredOutputPayload) -reg(InstructionGeneratePayload) -reg(InstructionTemplatePayload) -reg(ModelConfig) +register_enum_models(console_ns, LLMMode) +register_schema_models( + console_ns, + RuleGeneratePayload, + RuleCodeGeneratePayload, + RuleStructuredOutputPayload, + InstructionGeneratePayload, + InstructionTemplatePayload, + ModelConfig, +) @console_ns.route("/rule-generate") diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 55bd679b48..572f9773a1 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -5,7 +5,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field, computed_field, field_validator from constants.languages import languages -from controllers.common.schema import register_schema_models +from controllers.common.schema import query_params_from_model, register_schema_models from controllers.console import console_ns from controllers.console.wraps import account_initialization_required from fields.base import ResponseModel @@ -15,7 +15,7 @@ from services.recommended_app_service import RecommendedAppService class RecommendedAppsQuery(BaseModel): - language: str | None = Field(default=None) + language: str | None = Field(default=None, description="Language code for recommended app localization") class RecommendedAppInfoResponse(ResponseModel): @@ -52,7 +52,7 @@ class RecommendedAppResponse(ResponseModel): copyright: str | None = None privacy_policy: str | None = None custom_disclaimer: str | None = None - category: str | None = None + categories: list[str] = Field(default_factory=list) position: int | None = None is_listed: bool | None = None can_trial: bool | None = None @@ -74,7 +74,7 @@ register_schema_models( @console_ns.route("/explore/apps") class RecommendedAppListApi(Resource): - @console_ns.expect(console_ns.models[RecommendedAppsQuery.__name__]) + @console_ns.doc(params=query_params_from_model(RecommendedAppsQuery)) @console_ns.response(200, "Success", console_ns.models[RecommendedAppListResponse.__name__]) @login_required @account_initialization_required diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 34c9534de8..e653c9064c 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -876,10 +876,10 @@ class ToolBuiltinProviderSetDefaultApi(Resource): @login_required @account_initialization_required def post(self, provider): - current_user, current_tenant_id = current_account_with_tenant() + _, current_tenant_id = current_account_with_tenant() payload = BuiltinProviderDefaultCredentialPayload.model_validate(console_ns.payload or {}) return BuiltinToolManageService.set_default_provider( - tenant_id=current_tenant_id, user_id=current_user.id, provider=provider, id=payload.id + tenant_id=current_tenant_id, provider=provider, id=payload.id ) diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 7bab3f7bff..4a741d3154 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -842,24 +842,24 @@ class WorkflowResponseConverter: return [] files: list[Mapping[str, Any]] = [] - if isinstance(value, FileSegment): - files.append(value.value.to_dict()) - elif isinstance(value, ArrayFileSegment): - files.extend([i.to_dict() for i in value.value]) - elif isinstance(value, File): - files.append(value.to_dict()) - elif isinstance(value, list): - for item in value: - file = cls._get_file_var_from_value(item) + match value: + case FileSegment(): + files.append(value.value.to_dict()) + case ArrayFileSegment(): + files.extend([i.to_dict() for i in value.value]) + case File(): + files.append(value.to_dict()) + case list(): + for item in value: + file = cls._get_file_var_from_value(item) + if file: + files.append(file) + case dict(): + file = cls._get_file_var_from_value(value) if file: files.append(file) - elif isinstance( - value, - dict, - ): - file = cls._get_file_var_from_value(value) - if file: - files.append(file) + case _: + pass return files diff --git a/api/core/app/llm/__init__.py b/api/core/app/llm/__init__.py index f069bede74..d20a5b2344 100644 --- a/api/core/app/llm/__init__.py +++ b/api/core/app/llm/__init__.py @@ -1,5 +1,15 @@ """LLM-related application services.""" -from .quota import deduct_llm_quota, ensure_llm_quota_available +from .quota import ( + deduct_llm_quota, + deduct_llm_quota_for_model, + ensure_llm_quota_available, + ensure_llm_quota_available_for_model, +) -__all__ = ["deduct_llm_quota", "ensure_llm_quota_available"] +__all__ = [ + "deduct_llm_quota", + "deduct_llm_quota_for_model", + "ensure_llm_quota_available", + "ensure_llm_quota_available_for_model", +] diff --git a/api/core/app/llm/quota.py b/api/core/app/llm/quota.py index b6039e1e4e..5bf3334a7b 100644 --- a/api/core/app/llm/quota.py +++ b/api/core/app/llm/quota.py @@ -1,4 +1,14 @@ -from sqlalchemy import update +"""Tenant-scoped helpers for checking and deducting LLM provider quota. + +System-hosted quota accounting is currently defined only for LLM models. Keep +the public helpers LLM-specific so callers do not carry unused model-type +plumbing, and fail loudly if the deprecated ``ModelInstance`` wrappers are used +with a non-LLM model. +""" + +import warnings + +from sqlalchemy import select from sqlalchemy.orm import sessionmaker from configs import dify_config @@ -6,44 +16,47 @@ from core.entities.model_entities import ModelStatus from core.entities.provider_entities import ProviderQuotaType, QuotaUnit from core.errors.error import QuotaExceededError from core.model_manager import ModelInstance +from core.plugin.impl.model_runtime_factory import create_plugin_provider_manager from extensions.ext_database import db from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.model_entities import ModelType from libs.datetime_utils import naive_utc_now from models.provider import Provider, ProviderType from models.provider_ids import ModelProviderID -def ensure_llm_quota_available(*, model_instance: ModelInstance) -> None: - provider_model_bundle = model_instance.provider_model_bundle - provider_configuration = provider_model_bundle.configuration +def _get_provider_configuration(*, tenant_id: str, provider: str): + """Resolve the tenant-bound provider configuration for quota decisions.""" + provider_manager = create_plugin_provider_manager(tenant_id=tenant_id) + provider_configuration = provider_manager.get_configurations(tenant_id).get(provider) + if provider_configuration is None: + raise ValueError(f"Provider {provider} does not exist.") + return provider_configuration + +def ensure_llm_quota_available_for_model(*, tenant_id: str, provider: str, model: str) -> None: + """Raise when a tenant-bound LLM model is already out of quota.""" + provider_configuration = _get_provider_configuration(tenant_id=tenant_id, provider=provider) if provider_configuration.using_provider_type != ProviderType.SYSTEM: return provider_model = provider_configuration.get_provider_model( - model_type=model_instance.model_type_instance.model_type, - model=model_instance.model_name, + model_type=ModelType.LLM, + model=model, ) if provider_model and provider_model.status == ModelStatus.QUOTA_EXCEEDED: - raise QuotaExceededError(f"Model provider {model_instance.provider} quota exceeded.") + raise QuotaExceededError(f"Model provider {provider} quota exceeded.") -def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None: - provider_model_bundle = model_instance.provider_model_bundle - provider_configuration = provider_model_bundle.configuration - - if provider_configuration.using_provider_type != ProviderType.SYSTEM: - return - - system_configuration = provider_configuration.system_configuration - +def _resolve_llm_used_quota(*, system_configuration, model: str, usage: LLMUsage) -> int | None: + """Compute the quota impact for an LLM invocation under the current quota mode.""" quota_unit = None for quota_configuration in system_configuration.quota_configurations: if quota_configuration.quota_type == system_configuration.current_quota_type: quota_unit = quota_configuration.quota_unit if quota_configuration.quota_limit == -1: - return + return None break @@ -52,42 +65,136 @@ def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LL if quota_unit == QuotaUnit.TOKENS: used_quota = usage.total_tokens elif quota_unit == QuotaUnit.CREDITS: - used_quota = dify_config.get_model_credits(model_instance.model_name) + used_quota = dify_config.get_model_credits(model) else: used_quota = 1 + return used_quota + + +def _deduct_free_llm_quota( + *, + tenant_id: str, + provider: str, + quota_type: ProviderQuotaType, + used_quota: int, +) -> None: + """Deduct FREE provider quota, capping at the limit before reporting exhaustion.""" + quota_exceeded = False + with sessionmaker(bind=db.engine).begin() as session: + provider_record = session.scalar( + select(Provider) + .where( + Provider.tenant_id == tenant_id, + # TODO: Use provider name with prefix after the data migration. + Provider.provider_name == ModelProviderID(provider).provider_name, + Provider.provider_type == ProviderType.SYSTEM.value, + Provider.quota_type == quota_type, + ) + .with_for_update() + ) + if ( + provider_record is None + or provider_record.quota_limit is None + or provider_record.quota_used is None + or provider_record.quota_limit <= provider_record.quota_used + ): + quota_exceeded = True + else: + available_quota = provider_record.quota_limit - provider_record.quota_used + deducted_quota = min(used_quota, available_quota) + provider_record.quota_used += deducted_quota + provider_record.last_used = naive_utc_now() + quota_exceeded = deducted_quota < used_quota + + if quota_exceeded: + raise QuotaExceededError(f"Model provider {provider} quota exceeded.") + + +def _deduct_used_llm_quota(*, tenant_id: str, provider: str, provider_configuration, used_quota: int | None) -> None: + """Apply a resolved LLM quota charge against the current provider quota bucket.""" + if provider_configuration.using_provider_type != ProviderType.SYSTEM: + return + + system_configuration = provider_configuration.system_configuration if used_quota is not None and system_configuration.current_quota_type is not None: match system_configuration.current_quota_type: case ProviderQuotaType.TRIAL: from services.credit_pool_service import CreditPoolService - CreditPoolService.check_and_deduct_credits( + CreditPoolService.deduct_credits_capped( tenant_id=tenant_id, credits_required=used_quota, ) case ProviderQuotaType.PAID: from services.credit_pool_service import CreditPoolService - CreditPoolService.check_and_deduct_credits( + CreditPoolService.deduct_credits_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="paid", ) case ProviderQuotaType.FREE: - with sessionmaker(bind=db.engine).begin() as session: - stmt = ( - update(Provider) - .where( - Provider.tenant_id == tenant_id, - # TODO: Use provider name with prefix after the data migration. - Provider.provider_name == ModelProviderID(model_instance.provider).provider_name, - Provider.provider_type == ProviderType.SYSTEM.value, - Provider.quota_type == system_configuration.current_quota_type, - Provider.quota_limit > Provider.quota_used, - ) - .values( - quota_used=Provider.quota_used + used_quota, - last_used=naive_utc_now(), - ) - ) - session.execute(stmt) + _deduct_free_llm_quota( + tenant_id=tenant_id, + provider=provider, + quota_type=system_configuration.current_quota_type, + used_quota=used_quota, + ) + case _: + return + + +def deduct_llm_quota_for_model(*, tenant_id: str, provider: str, model: str, usage: LLMUsage) -> None: + """Deduct tenant-bound quota for the resolved LLM model identity.""" + provider_configuration = _get_provider_configuration(tenant_id=tenant_id, provider=provider) + used_quota = _resolve_llm_used_quota( + system_configuration=provider_configuration.system_configuration, + model=model, + usage=usage, + ) + _deduct_used_llm_quota( + tenant_id=tenant_id, + provider=provider, + provider_configuration=provider_configuration, + used_quota=used_quota, + ) + + +def _require_llm_model_instance(model_instance: ModelInstance) -> None: + """Reject deprecated wrapper calls that pass a non-LLM model instance.""" + if model_instance.model_type_instance.model_type != ModelType.LLM: + raise ValueError("LLM quota helpers only support LLM model instances.") + + +def ensure_llm_quota_available(*, model_instance: ModelInstance) -> None: + """Deprecated compatibility wrapper for callers that still pass ModelInstance.""" + warnings.warn( + "ensure_llm_quota_available(model_instance=...) is deprecated; " + "use ensure_llm_quota_available_for_model(...) instead.", + DeprecationWarning, + stacklevel=2, + ) + _require_llm_model_instance(model_instance) + ensure_llm_quota_available_for_model( + tenant_id=model_instance.provider_model_bundle.configuration.tenant_id, + provider=model_instance.provider, + model=model_instance.model_name, + ) + + +def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None: + """Deprecated compatibility wrapper for callers that still pass ModelInstance.""" + warnings.warn( + "deduct_llm_quota(tenant_id=..., model_instance=..., usage=...) is deprecated; " + "use deduct_llm_quota_for_model(...) instead.", + DeprecationWarning, + stacklevel=2, + ) + _require_llm_model_instance(model_instance) + deduct_llm_quota_for_model( + tenant_id=tenant_id, + provider=model_instance.provider, + model=model_instance.model_name, + usage=usage, + ) diff --git a/api/core/app/workflow/layers/llm_quota.py b/api/core/app/workflow/layers/llm_quota.py index 4a7918032e..2422eed5a7 100644 --- a/api/core/app/workflow/layers/llm_quota.py +++ b/api/core/app/workflow/layers/llm_quota.py @@ -1,36 +1,48 @@ """ LLM quota deduction layer for GraphEngine. -This layer centralizes model-quota deduction outside node implementations. +This layer centralizes model-quota handling outside node implementations. + +Graphon LLM-backed nodes expose provider/model identity through public node +configuration and, after execution, through ``node_run_result.inputs``. Resolve +quota billing from that public identity instead of depending on +``ModelInstance`` reconstruction inside the workflow layer. Missing identity on +quota-tracked nodes is treated as a workflow bug and aborts execution so quota +handling is never silently skipped. """ import logging -from typing import TYPE_CHECKING, cast, final, override +from typing import final, override -from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY, DifyRunContext -from core.app.llm import deduct_llm_quota, ensure_llm_quota_available +from core.app.llm import deduct_llm_quota_for_model, ensure_llm_quota_available_for_model from core.errors.error import QuotaExceededError -from core.model_manager import ModelInstance -from graphon.enums import BuiltinNodeTypes +from graphon.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus from graphon.graph_engine.entities.commands import AbortCommand, CommandType from graphon.graph_engine.layers import GraphEngineLayer from graphon.graph_events import GraphEngineEvent, GraphNodeEventBase, NodeRunSucceededEvent +from graphon.node_events import NodeRunResult from graphon.nodes.base.node import Node -if TYPE_CHECKING: - from graphon.nodes.llm.node import LLMNode - from graphon.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode - from graphon.nodes.question_classifier.question_classifier_node import QuestionClassifierNode - logger = logging.getLogger(__name__) +_QUOTA_NODE_TYPES = frozenset( + [ + BuiltinNodeTypes.LLM, + BuiltinNodeTypes.PARAMETER_EXTRACTOR, + BuiltinNodeTypes.QUESTION_CLASSIFIER, + ] +) @final class LLMQuotaLayer(GraphEngineLayer): - """Graph layer that applies LLM quota deduction after node execution.""" + """Graph layer that applies tenant-scoped quota checks to LLM-backed nodes.""" - def __init__(self) -> None: + tenant_id: str + _abort_sent: bool + + def __init__(self, tenant_id: str) -> None: super().__init__() + self.tenant_id = tenant_id self._abort_sent = False @override @@ -50,33 +62,49 @@ class LLMQuotaLayer(GraphEngineLayer): if self._abort_sent: return - model_instance = self._extract_model_instance(node) - if model_instance is None: + if not self._supports_quota(node): return + model_identity = self._extract_model_identity_from_node(node) + if model_identity is None: + reason = "LLM quota check requires public node model identity before execution." + self._abort_before_node_run(node=node, reason=reason, error_type="LLMQuotaIdentityError") + logger.error("LLM quota handling aborted, node_id=%s, reason=%s", node.id, reason) + return + + provider, model_name = model_identity try: - ensure_llm_quota_available(model_instance=model_instance) + ensure_llm_quota_available_for_model( + tenant_id=self.tenant_id, + provider=provider, + model=model_name, + ) except QuotaExceededError as exc: - self._set_stop_event(node) - self._send_abort_command(reason=str(exc)) + self._abort_before_node_run(node=node, reason=str(exc), error_type=QuotaExceededError.__name__) logger.warning("LLM quota check failed, node_id=%s, error=%s", node.id, exc) @override def on_node_run_end( self, node: Node, error: Exception | None, result_event: GraphNodeEventBase | None = None ) -> None: - if error is not None or not isinstance(result_event, NodeRunSucceededEvent): + if error is not None or not isinstance(result_event, NodeRunSucceededEvent) or not self._supports_quota(node): return - model_instance = self._extract_model_instance(node) - if model_instance is None: + model_identity = self._extract_model_identity_from_result_event(result_event) + if model_identity is None: + self._abort_for_missing_model_identity( + node=node, + reason="LLM quota deduction requires model identity in the node result event.", + ) return + provider, model_name = model_identity + try: - dify_ctx = DifyRunContext.model_validate(node.require_run_context_value(DIFY_RUN_CONTEXT_KEY)) - deduct_llm_quota( - tenant_id=dify_ctx.tenant_id, - model_instance=model_instance, + deduct_llm_quota_for_model( + tenant_id=self.tenant_id, + provider=provider, + model=model_name, usage=result_event.node_run_result.llm_usage, ) except QuotaExceededError as exc: @@ -92,6 +120,27 @@ class LLMQuotaLayer(GraphEngineLayer): if stop_event is not None: stop_event.set() + def _abort_before_node_run(self, *, node: Node, reason: str, error_type: str) -> None: + self._set_stop_event(node) + node.node_data.error_strategy = None + node.node_data.retry_config.retry_enabled = False + + def quota_aborted_run() -> NodeRunResult: + return NodeRunResult( + status=WorkflowNodeExecutionStatus.FAILED, + error=reason, + error_type=error_type, + ) + + # TODO: Push Graphon to expose a public pre-run failure/skip hook, then replace this private _run override. + node._run = quota_aborted_run # type: ignore[method-assign] + self._send_abort_command(reason=reason) + + def _abort_for_missing_model_identity(self, *, node: Node, reason: str) -> None: + self._set_stop_event(node) + self._send_abort_command(reason=reason) + logger.error("LLM quota handling aborted, node_id=%s, reason=%s", node.id, reason) + def _send_abort_command(self, *, reason: str) -> None: if not self.command_channel or self._abort_sent: return @@ -108,29 +157,38 @@ class LLMQuotaLayer(GraphEngineLayer): logger.exception("Failed to send quota abort command") @staticmethod - def _extract_model_instance(node: Node) -> ModelInstance | None: - try: - match node.node_type: - case BuiltinNodeTypes.LLM: - model_instance = cast("LLMNode", node).model_instance - case BuiltinNodeTypes.PARAMETER_EXTRACTOR: - model_instance = cast("ParameterExtractorNode", node).model_instance - case BuiltinNodeTypes.QUESTION_CLASSIFIER: - model_instance = cast("QuestionClassifierNode", node).model_instance - case _: - return None - except AttributeError: + def _supports_quota(node: Node) -> bool: + return node.node_type in _QUOTA_NODE_TYPES + + @staticmethod + def _extract_model_identity_from_result_event(result_event: NodeRunSucceededEvent) -> tuple[str, str] | None: + provider = result_event.node_run_result.inputs.get("model_provider") + model_name = result_event.node_run_result.inputs.get("model_name") + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + return provider, model_name + return None + + @staticmethod + def _extract_model_identity_from_node(node: Node) -> tuple[str, str] | None: + node_data = getattr(node, "node_data", None) + if node_data is None: + node_data = getattr(node, "data", None) + + model_config = getattr(node_data, "model", None) + if model_config is None: logger.warning( - "LLMQuotaLayer skipped quota deduction because node does not expose a model instance, node_id=%s", + "LLMQuotaLayer skipped quota handling because node model config is missing, node_id=%s", node.id, ) return None - if isinstance(model_instance, ModelInstance): - return model_instance - - raw_model_instance = getattr(model_instance, "_model_instance", None) - if isinstance(raw_model_instance, ModelInstance): - return raw_model_instance + provider = getattr(model_config, "provider", None) + model_name = getattr(model_config, "name", None) + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + return provider, model_name + logger.warning( + "LLMQuotaLayer skipped quota handling because node model identity is invalid, node_id=%s", + node.id, + ) return None diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 38b87e2cd1..495fd1d898 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -23,7 +23,7 @@ from core.entities.provider_entities import ( ) from core.helper import encrypter from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderCredentialsCacheType -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_model_type_instance, create_plugin_model_assembly from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ( ConfigurateMethod, @@ -33,7 +33,7 @@ from graphon.model_runtime.entities.provider_entities import ( ) from graphon.model_runtime.model_providers.base.ai_model import AIModel from graphon.model_runtime.model_providers.model_provider_factory import ModelProviderFactory -from graphon.model_runtime.runtime import ModelRuntime +from graphon.model_runtime.protocols.runtime import ModelRuntime from libs.datetime_utils import naive_utc_now from models.engine import db from models.enums import CredentialSourceType @@ -106,11 +106,18 @@ class ProviderConfiguration(BaseModel): """Attach the already-composed runtime for request-bound call chains.""" self._bound_model_runtime = model_runtime + def _get_runtime_and_provider_factory(self) -> tuple[ModelRuntime, ModelProviderFactory]: + """Resolve a provider factory that stays aligned with the runtime used by the caller.""" + if self._bound_model_runtime is not None: + return self._bound_model_runtime, ModelProviderFactory(runtime=self._bound_model_runtime) + + model_assembly = create_plugin_model_assembly(tenant_id=self.tenant_id) + return model_assembly.model_runtime, model_assembly.model_provider_factory + def get_model_provider_factory(self) -> ModelProviderFactory: """Return a provider factory that preserves any request-bound runtime.""" - if self._bound_model_runtime is not None: - return ModelProviderFactory(model_runtime=self._bound_model_runtime) - return create_plugin_model_provider_factory(tenant_id=self.tenant_id) + _, model_provider_factory = self._get_runtime_and_provider_factory() + return model_provider_factory def get_current_credentials(self, model_type: ModelType, model: str) -> dict[str, Any] | None: """ @@ -1392,10 +1399,13 @@ class ProviderConfiguration(BaseModel): :param model_type: model type :return: """ - model_provider_factory = self.get_model_provider_factory() - - # Get model instance of LLM - return model_provider_factory.get_model_type_instance(provider=self.provider.provider, model_type=model_type) + model_runtime, model_provider_factory = self._get_runtime_and_provider_factory() + provider_schema = model_provider_factory.get_provider_schema(provider=self.provider.provider) + return create_model_type_instance( + runtime=model_runtime, + provider_schema=provider_schema, + model_type=model_type, + ) def get_model_schema( self, model_type: ModelType, model: str, credentials: dict[str, Any] | None diff --git a/api/core/helper/moderation.py b/api/core/helper/moderation.py index f169f247cf..18b9b72e9d 100644 --- a/api/core/helper/moderation.py +++ b/api/core/helper/moderation.py @@ -4,7 +4,7 @@ from typing import cast from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.entities import DEFAULT_PLUGIN_ID -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from extensions.ext_hosting_provider import hosting_configuration from graphon.model_runtime.entities.model_entities import ModelType from graphon.model_runtime.errors.invoke import InvokeBadRequestError @@ -41,10 +41,8 @@ def check_moderation(tenant_id: str, model_config: ModelConfigWithCredentialsEnt text_chunk = secrets.choice(text_chunks) try: - model_provider_factory = create_plugin_model_provider_factory(tenant_id=tenant_id) - - # Get model instance of LLM - model_type_instance = model_provider_factory.get_model_type_instance( + model_assembly = create_plugin_model_assembly(tenant_id=tenant_id) + model_type_instance = model_assembly.create_model_type_instance( provider=openai_provider_name, model_type=ModelType.MODERATION ) model_type_instance = cast(ModerationModel, model_type_instance) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index e7ba6e502b..bae0016744 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -569,13 +569,13 @@ class OpsTraceManager: db.session.commit() @classmethod - def get_app_tracing_config(cls, app_id: str): + def get_app_tracing_config(cls, app_id: str, session: Session): """ Get app tracing config :param app_id: app id :return: """ - app: App | None = db.session.get(App, app_id) + app: App | None = session.get(App, app_id) if not app: raise ValueError("App not found") if not app.tracing: diff --git a/api/core/plugin/impl/model_runtime.py b/api/core/plugin/impl/model_runtime.py index 4e66d58b5e..62573ba2f5 100644 --- a/api/core/plugin/impl/model_runtime.py +++ b/api/core/plugin/impl/model_runtime.py @@ -4,23 +4,32 @@ import hashlib import logging from collections.abc import Generator, Iterable, Sequence from threading import Lock -from typing import IO, Any, Union +from typing import IO, Any, Literal, cast, overload from pydantic import ValidationError from redis import RedisError from configs import dify_config +from core.llm_generator.output_parser.structured_output import ( + invoke_llm_with_structured_output as invoke_llm_with_structured_output_helper, +) from core.plugin.entities.plugin_daemon import PluginModelProviderEntity from core.plugin.impl.asset import PluginAssetManager from core.plugin.impl.model import PluginModelClient from extensions.ext_redis import redis_client -from graphon.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk +from graphon.model_runtime.entities.llm_entities import ( + LLMResult, + LLMResultChunk, + LLMResultChunkWithStructuredOutput, + LLMResultWithStructuredOutput, +) from graphon.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool from graphon.model_runtime.entities.model_entities import AIModelEntity, ModelType from graphon.model_runtime.entities.provider_entities import ProviderEntity from graphon.model_runtime.entities.rerank_entities import MultimodalRerankInput, RerankResult from graphon.model_runtime.entities.text_embedding_entities import EmbeddingInputType, EmbeddingResult -from graphon.model_runtime.runtime import ModelRuntime +from graphon.model_runtime.model_providers.base.large_language_model import normalize_non_stream_runtime_result +from graphon.model_runtime.protocols.runtime import ModelRuntime from models.provider_ids import ModelProviderID logger = logging.getLogger(__name__) @@ -29,6 +38,68 @@ logger = logging.getLogger(__name__) TENANT_SCOPE_SCHEMA_CACHE_USER_ID = "__DIFY_TS__" +# TODO(-LAN-): Move native structured-output invocation into Graphon's LLM node. +# TODO(-LAN-): Remove this Dify-side adapter once Graphon owns structured output end-to-end. +class _PluginStructuredOutputModelInstance: + """Bind plugin model identity to the shared structured-output helper. + + The structured-output parser is shared with legacy ``ModelInstance`` flows + and only needs an object exposing ``invoke_llm(...)``. ``PluginModelRuntime`` + intentionally exposes a lower-level API where provider, model, and + credentials are passed per call. This adapter supplies the small bound + ``invoke_llm`` surface the helper needs without constructing a full + ``ModelInstance`` or reintroducing model-manager dependencies into the + plugin runtime path. + """ + + def __init__( + self, + *, + runtime: PluginModelRuntime, + provider: str, + model: str, + credentials: dict[str, Any], + ) -> None: + self._runtime = runtime + self._provider = provider + self._model = model + self._credentials = credentials + + def invoke_llm( + self, + *, + prompt_messages: Sequence[PromptMessage], + model_parameters: dict[str, Any] | None = None, + tools: Sequence[PromptMessageTool] | None = None, + stop: Sequence[str] | None = None, + stream: bool = True, + callbacks: object | None = None, + ) -> LLMResult | Generator[LLMResultChunk, None, None]: + del callbacks + if stream: + return self._runtime.invoke_llm( + provider=self._provider, + model=self._model, + credentials=self._credentials, + model_parameters=model_parameters or {}, + prompt_messages=prompt_messages, + tools=list(tools) if tools else None, + stop=stop, + stream=True, + ) + + return self._runtime.invoke_llm( + provider=self._provider, + model=self._model, + credentials=self._credentials, + model_parameters=model_parameters or {}, + prompt_messages=prompt_messages, + tools=list(tools) if tools else None, + stop=stop, + stream=False, + ) + + class PluginModelRuntime(ModelRuntime): """Plugin-backed runtime adapter bound to tenant context and optional caller scope.""" @@ -195,6 +266,34 @@ class PluginModelRuntime(ModelRuntime): return schema + @overload + def invoke_llm( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + tools: list[PromptMessageTool] | None, + stop: Sequence[str] | None, + stream: Literal[False], + ) -> LLMResult: ... + + @overload + def invoke_llm( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + tools: list[PromptMessageTool] | None, + stop: Sequence[str] | None, + stream: Literal[True], + ) -> Generator[LLMResultChunk, None, None]: ... + def invoke_llm( self, *, @@ -206,9 +305,9 @@ class PluginModelRuntime(ModelRuntime): tools: list[PromptMessageTool] | None, stop: Sequence[str] | None, stream: bool, - ) -> Union[LLMResult, Generator[LLMResultChunk, None, None]]: + ) -> LLMResult | Generator[LLMResultChunk, None, None]: plugin_id, provider_name = self._split_provider(provider) - return self.client.invoke_llm( + result = self.client.invoke_llm( tenant_id=self.tenant_id, user_id=self.user_id, plugin_id=plugin_id, @@ -221,6 +320,81 @@ class PluginModelRuntime(ModelRuntime): stop=list(stop) if stop else None, stream=stream, ) + if stream: + return result + + return normalize_non_stream_runtime_result( + model=model, + prompt_messages=prompt_messages, + result=result, + ) + + @overload + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: Literal[False], + ) -> LLMResultWithStructuredOutput: ... + + @overload + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: Literal[True], + ) -> Generator[LLMResultChunkWithStructuredOutput, None, None]: ... + + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: bool, + ) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: + model_schema = self.get_model_schema( + provider=provider, + model_type=ModelType.LLM, + model=model, + credentials=credentials, + ) + if model_schema is None: + raise ValueError(f"Model schema not found for {model}") + + adapter = _PluginStructuredOutputModelInstance( + runtime=self, + provider=provider, + model=model, + credentials=credentials, + ) + return invoke_llm_with_structured_output_helper( + provider=provider, + model_schema=model_schema, + model_instance=cast(Any, adapter), + prompt_messages=prompt_messages, + json_schema=json_schema, + model_parameters=model_parameters, + tools=None, + stop=list(stop) if stop else None, + stream=stream, + ) def get_llm_num_tokens( self, diff --git a/api/core/plugin/impl/model_runtime_factory.py b/api/core/plugin/impl/model_runtime_factory.py index 35abd2ae8c..fbe307ea60 100644 --- a/api/core/plugin/impl/model_runtime_factory.py +++ b/api/core/plugin/impl/model_runtime_factory.py @@ -3,13 +3,46 @@ from __future__ import annotations from typing import TYPE_CHECKING from core.plugin.impl.model import PluginModelClient +from graphon.model_runtime.entities.model_entities import ModelType +from graphon.model_runtime.entities.provider_entities import ProviderEntity +from graphon.model_runtime.model_providers.base.ai_model import AIModel +from graphon.model_runtime.model_providers.base.large_language_model import LargeLanguageModel +from graphon.model_runtime.model_providers.base.moderation_model import ModerationModel +from graphon.model_runtime.model_providers.base.rerank_model import RerankModel +from graphon.model_runtime.model_providers.base.speech2text_model import Speech2TextModel +from graphon.model_runtime.model_providers.base.text_embedding_model import TextEmbeddingModel +from graphon.model_runtime.model_providers.base.tts_model import TTSModel from graphon.model_runtime.model_providers.model_provider_factory import ModelProviderFactory +from graphon.model_runtime.protocols.runtime import ModelRuntime if TYPE_CHECKING: from core.model_manager import ModelManager from core.plugin.impl.model_runtime import PluginModelRuntime from core.provider_manager import ProviderManager +_MODEL_CLASS_BY_TYPE: dict[ModelType, type[AIModel]] = { + ModelType.LLM: LargeLanguageModel, + ModelType.TEXT_EMBEDDING: TextEmbeddingModel, + ModelType.RERANK: RerankModel, + ModelType.SPEECH2TEXT: Speech2TextModel, + ModelType.MODERATION: ModerationModel, + ModelType.TTS: TTSModel, +} + + +def create_model_type_instance( + *, + runtime: ModelRuntime, + provider_schema: ProviderEntity, + model_type: ModelType, +) -> AIModel: + """Build the graphon model wrapper explicitly against the request runtime.""" + model_class = _MODEL_CLASS_BY_TYPE.get(model_type) + if model_class is None: + raise ValueError(f"Unsupported model type: {model_type}") + + return model_class(provider_schema=provider_schema, model_runtime=runtime) + class PluginModelAssembly: """Compose request-scoped model views on top of a single plugin runtime.""" @@ -38,9 +71,22 @@ class PluginModelAssembly: @property def model_provider_factory(self) -> ModelProviderFactory: if self._model_provider_factory is None: - self._model_provider_factory = ModelProviderFactory(model_runtime=self.model_runtime) + self._model_provider_factory = ModelProviderFactory(runtime=self.model_runtime) return self._model_provider_factory + def create_model_type_instance( + self, + *, + provider: str, + model_type: ModelType, + ) -> AIModel: + provider_schema = self.model_provider_factory.get_provider_schema(provider=provider) + return create_model_type_instance( + runtime=self.model_runtime, + provider_schema=provider_schema, + model_type=model_type, + ) + @property def provider_manager(self) -> ProviderManager: if self._provider_manager is None: diff --git a/api/core/prompt/utils/prompt_message_util.py b/api/core/prompt/utils/prompt_message_util.py index ba76eb0c4e..11414832e3 100644 --- a/api/core/prompt/utils/prompt_message_util.py +++ b/api/core/prompt/utils/prompt_message_util.py @@ -53,24 +53,27 @@ class PromptMessageUtil: files = [] if isinstance(prompt_message.content, list): for content in prompt_message.content: - if isinstance(content, TextPromptMessageContent): - text += content.data - elif isinstance(content, ImagePromptMessageContent): - files.append( - { - "type": "image", - "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], - "detail": content.detail.value, - } - ) - elif isinstance(content, AudioPromptMessageContent): - files.append( - { - "type": "audio", - "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], - "format": content.format, - } - ) + match content: + case TextPromptMessageContent(): + text += content.data + case ImagePromptMessageContent(): + files.append( + { + "type": "image", + "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], + "detail": content.detail.value, + } + ) + case AudioPromptMessageContent(): + files.append( + { + "type": "audio", + "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], + "format": content.format, + } + ) + case _: + continue else: text = cast(str, prompt_message.content) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index b290ae456e..9faa70a0b8 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -56,7 +56,7 @@ from models.provider_ids import ModelProviderID from services.feature_service import FeatureService if TYPE_CHECKING: - from graphon.model_runtime.runtime import ModelRuntime + from graphon.model_runtime.protocols.runtime import ModelRuntime _credentials_adapter: TypeAdapter[dict[str, Any]] = TypeAdapter(dict[str, Any]) @@ -165,7 +165,7 @@ class ProviderManager: ) # Get all provider entities - model_provider_factory = ModelProviderFactory(model_runtime=self._model_runtime) + model_provider_factory = ModelProviderFactory(runtime=self._model_runtime) provider_entities = model_provider_factory.get_providers() # Get All preferred provider types of the workspace @@ -362,7 +362,7 @@ class ProviderManager: if not default_model: return None - model_provider_factory = ModelProviderFactory(model_runtime=self._model_runtime) + model_provider_factory = ModelProviderFactory(runtime=self._model_runtime) provider_schema = model_provider_factory.get_provider_schema(provider=default_model.provider_name) return DefaultModelEntity( diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 5679466cbc..4c6e647335 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -23,36 +23,37 @@ _TOOL_FILE_URL_PATTERN = re.compile(r"(?:^|/+)files/tools/(?P[^/?# def safe_json_value(v): - if isinstance(v, datetime): - tz_name = "UTC" - if isinstance(current_user, Account) and current_user.timezone is not None: - tz_name = current_user.timezone - return v.astimezone(pytz.timezone(tz_name)).isoformat() - elif isinstance(v, date): - return v.isoformat() - elif isinstance(v, UUID): - return str(v) - elif isinstance(v, Decimal): - return float(v) - elif isinstance(v, bytes): - try: - return v.decode("utf-8") - except UnicodeDecodeError: - return v.hex() - elif isinstance(v, memoryview): - return v.tobytes().hex() - elif isinstance(v, np.integer): - return int(v) - elif isinstance(v, np.floating): - return float(v) - elif isinstance(v, np.ndarray): - return v.tolist() - elif isinstance(v, dict): - return safe_json_dict(v) - elif isinstance(v, list | tuple | set): - return [safe_json_value(i) for i in v] - else: - return v + match v: + case datetime(): + tz_name = "UTC" + if isinstance(current_user, Account) and current_user.timezone is not None: + tz_name = current_user.timezone + return v.astimezone(pytz.timezone(tz_name)).isoformat() + case date(): + return v.isoformat() + case UUID(): + return str(v) + case Decimal(): + return float(v) + case bytes(): + try: + return v.decode("utf-8") + except UnicodeDecodeError: + return v.hex() + case memoryview(): + return v.tobytes().hex() + case np.integer(): + return int(v) + case np.floating(): + return float(v) + case np.ndarray(): + return v.tolist() + case dict(): + return safe_json_dict(v) + case list() | tuple() | set(): + return [safe_json_value(i) for i in v] + case _: + return v def safe_json_dict(d: dict[str, Any]): diff --git a/api/core/workflow/node_factory.py b/api/core/workflow/node_factory.py index 895953a3c1..a306b1c9ac 100644 --- a/api/core/workflow/node_factory.py +++ b/api/core/workflow/node_factory.py @@ -374,11 +374,6 @@ class DifyNodeFactory(NodeFactory): # Re-validate using the resolved node class so workflow-local node schemas # stay explicit and constructors receive the concrete typed payload. resolved_node_data = self._validate_resolved_node_data(node_class, node_data) - config_for_node_init: BaseNodeData | dict[str, Any] - if isinstance(resolved_node_data, BaseNodeData): - config_for_node_init = resolved_node_data.model_dump(mode="python", by_alias=True) - else: - config_for_node_init = resolved_node_data node_type = node_data.type node_init_kwargs_factories: Mapping[NodeType, Callable[[], dict[str, object]]] = { BuiltinNodeTypes.CODE: lambda: { @@ -446,9 +441,10 @@ class DifyNodeFactory(NodeFactory): }, } node_init_kwargs = node_init_kwargs_factories.get(node_type, lambda: {})() + constructor_node_data = resolved_node_data.model_dump(mode="python", by_alias=True) return node_class( node_id=node_id, - config=config_for_node_init, + data=constructor_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, **node_init_kwargs, diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 68a24e86b1..17d71668cb 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -35,7 +35,7 @@ class AgentNode(Node[AgentNodeData]): def __init__( self, node_id: str, - config: AgentNodeData, + data: AgentNodeData, *, graph_init_params: GraphInitParams, graph_runtime_state: GraphRuntimeState, @@ -46,7 +46,7 @@ class AgentNode(Node[AgentNodeData]): ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/datasource/datasource_node.py b/api/core/workflow/nodes/datasource/datasource_node.py index f3006c4242..a4ef3d1ea7 100644 --- a/api/core/workflow/nodes/datasource/datasource_node.py +++ b/api/core/workflow/nodes/datasource/datasource_node.py @@ -36,14 +36,14 @@ class DatasourceNode(Node[DatasourceNodeData]): def __init__( self, node_id: str, - config: DatasourceNodeData, + data: DatasourceNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 9c1b7ab2c4..1d60f530a1 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -32,14 +32,14 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): def __init__( self, node_id: str, - config: KnowledgeIndexNodeData, + data: KnowledgeIndexNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 25f73e446d..1aba2737b0 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -71,14 +71,14 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD def __init__( self, node_id: str, - config: KnowledgeRetrievalNodeData, + data: KnowledgeRetrievalNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/system_variables.py b/api/core/workflow/system_variables.py index 9d15a3fcea..77ef3826e9 100644 --- a/api/core/workflow/system_variables.py +++ b/api/core/workflow/system_variables.py @@ -3,7 +3,7 @@ from __future__ import annotations from collections import defaultdict from collections.abc import Mapping, Sequence from enum import StrEnum -from typing import Any, Protocol, cast +from typing import Any, Protocol from uuid import uuid4 from graphon.enums import BuiltinNodeTypes @@ -82,13 +82,10 @@ def build_system_variables(values: Mapping[str, Any] | None = None, /, **kwargs: normalized = _normalize_system_variable_values(values, **kwargs) return [ - cast( - Variable, - segment_to_variable( - segment=build_segment(value), - selector=system_variable_selector(key), - name=key, - ), + segment_to_variable( + segment=build_segment(value), + selector=system_variable_selector(key), + name=key, ) for key, value in normalized.items() ] @@ -130,13 +127,10 @@ def build_bootstrap_variables( for node_id, value in rag_pipeline_variables_map.items(): variables.append( - cast( - Variable, - segment_to_variable( - segment=build_segment(value), - selector=(RAG_PIPELINE_VARIABLE_NODE_ID, node_id), - name=node_id, - ), + segment_to_variable( + segment=build_segment(value), + selector=(RAG_PIPELINE_VARIABLE_NODE_ID, node_id), + name=node_id, ) ) diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index 4e2f603e5b..3019704dac 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -46,6 +46,11 @@ _file_access_controller = DatabaseFileAccessController() class _WorkflowChildEngineBuilder: + tenant_id: str + + def __init__(self, *, tenant_id: str) -> None: + self.tenant_id = tenant_id + @staticmethod def _has_node_id(graph_config: Mapping[str, Any], node_id: str) -> bool | None: """ @@ -107,7 +112,7 @@ class _WorkflowChildEngineBuilder: config=config, child_engine_builder=self, ) - child_engine.layer(LLMQuotaLayer()) + child_engine.layer(LLMQuotaLayer(tenant_id=self.tenant_id)) return child_engine @@ -176,7 +181,7 @@ class WorkflowEntry: self.command_channel = command_channel execution_context = capture_current_context() graph_runtime_state.execution_context = execution_context - self._child_engine_builder = _WorkflowChildEngineBuilder() + self._child_engine_builder = _WorkflowChildEngineBuilder(tenant_id=tenant_id) self.graph_engine = GraphEngine( workflow_id=workflow_id, graph=graph, @@ -208,7 +213,7 @@ class WorkflowEntry: max_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME ) self.graph_engine.layer(limits_layer) - self.graph_engine.layer(LLMQuotaLayer()) + self.graph_engine.layer(LLMQuotaLayer(tenant_id=tenant_id)) # Add observability layer when OTel is enabled if dify_config.ENABLE_OTEL or is_instrument_flag_enabled(): diff --git a/api/dev/generate_fastopenapi_specs.py b/api/dev/generate_fastopenapi_specs.py new file mode 100644 index 0000000000..5a94d32b93 --- /dev/null +++ b/api/dev/generate_fastopenapi_specs.py @@ -0,0 +1,95 @@ +"""Generate FastOpenAPI OpenAPI 3.0 specs without booting the full backend.""" + +from __future__ import annotations + +import argparse +import json +import logging +import sys +from dataclasses import dataclass +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_swagger_specs import apply_runtime_defaults, drop_null_values, sort_openapi_arrays + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class FastOpenApiSpecTarget: + route: str + filename: str + + +FASTOPENAPI_SPEC_TARGETS: tuple[FastOpenApiSpecTarget, ...] = ( + FastOpenApiSpecTarget(route="/fastopenapi/openapi.json", filename="fastopenapi-console-openapi.json"), +) + + +def create_fastopenapi_spec_app(): + """Build a minimal Flask app that only mounts FastOpenAPI docs routes.""" + + apply_runtime_defaults() + + from app_factory import create_flask_app_with_configs + from extensions import ext_fastopenapi + + app = create_flask_app_with_configs() + ext_fastopenapi.init_app(app) + return app + + +def generate_fastopenapi_specs(output_dir: Path) -> list[Path]: + """Write FastOpenAPI specs to `output_dir` and return the written paths.""" + + output_dir.mkdir(parents=True, exist_ok=True) + + app = create_fastopenapi_spec_app() + client = app.test_client() + + written_paths: list[Path] = [] + for target in FASTOPENAPI_SPEC_TARGETS: + response = client.get(target.route) + if response.status_code != 200: + raise RuntimeError(f"failed to fetch {target.route}: {response.status_code}") + + payload = response.get_json() + if not isinstance(payload, dict): + raise RuntimeError(f"unexpected response payload for {target.route}") + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) + + output_path = output_dir / target.filename + output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") + written_paths.append(output_path) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-o", + "--output-dir", + type=Path, + default=Path("openapi"), + help="Directory where the OpenAPI JSON files will be written.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_fastopenapi_specs(args.output_dir) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_markdown_docs.py b/api/dev/generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..0900d08331 --- /dev/null +++ b/api/dev/generate_swagger_markdown_docs.py @@ -0,0 +1,161 @@ +"""Generate OpenAPI JSON specs and split Markdown API docs. + +The Markdown step uses `swagger-markdown`, the same converter family as the +Swagger Markdown UI, so CI and local regeneration catch converter-incompatible +OpenAPI output early. +""" + +from __future__ import annotations + +import argparse +import logging +import subprocess +import sys +import tempfile +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_fastopenapi_specs import FASTOPENAPI_SPEC_TARGETS, generate_fastopenapi_specs +from dev.generate_swagger_specs import SPEC_TARGETS, generate_specs + +logger = logging.getLogger(__name__) + +SWAGGER_MARKDOWN_PACKAGE = "swagger-markdown@3.0.0" +CONSOLE_SWAGGER_FILENAME = "console-swagger.json" +STALE_COMBINED_MARKDOWN_FILENAME = "api-reference.md" + + +def _convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + subprocess.run( + [ + "npx", + "--yes", + SWAGGER_MARKDOWN_PACKAGE, + "-i", + str(spec_path), + "-o", + str(markdown_path), + ], + check=True, + ) + + +def _demote_markdown_headings(markdown: str, *, levels: int = 1) -> str: + """Nest generated Markdown under another Markdown section.""" + + heading_prefix = "#" * levels + lines = [] + for line in markdown.splitlines(): + if line.startswith("#"): + lines.append(f"{heading_prefix}{line}") + else: + lines.append(line) + return "\n".join(lines).strip() + + +def _append_fastopenapi_markdown(console_markdown_path: Path, fastopenapi_markdown_path: Path) -> None: + """Append FastOpenAPI console docs to the existing console API Markdown.""" + + console_markdown = console_markdown_path.read_text(encoding="utf-8").rstrip() + fastopenapi_markdown = _demote_markdown_headings( + fastopenapi_markdown_path.read_text(encoding="utf-8"), + levels=2, + ) + console_markdown_path.write_text( + "\n\n".join( + [ + console_markdown, + "## FastOpenAPI Preview (OpenAPI 3.0)", + fastopenapi_markdown, + ] + ) + + "\n", + encoding="utf-8", + ) + + +def generate_markdown_docs( + swagger_dir: Path, + markdown_dir: Path, + *, + keep_swagger_json: bool = False, +) -> list[Path]: + """Generate intermediate specs, convert them to split Markdown API docs, and return Markdown paths.""" + + swagger_paths = generate_specs(swagger_dir) + fastopenapi_paths = generate_fastopenapi_specs(swagger_dir) + spec_paths = [*swagger_paths, *fastopenapi_paths] + swagger_paths_by_name = {path.name: path for path in swagger_paths} + fastopenapi_paths_by_name = {path.name: path for path in fastopenapi_paths} + + markdown_dir.mkdir(parents=True, exist_ok=True) + + written_paths: list[Path] = [] + try: + with tempfile.TemporaryDirectory(prefix="dify-api-docs-") as temp_dir: + temp_markdown_dir = Path(temp_dir) + + for target in SPEC_TARGETS: + swagger_path = swagger_paths_by_name[target.filename] + markdown_path = markdown_dir / f"{swagger_path.stem}.md" + _convert_spec_to_markdown(swagger_path, markdown_path) + written_paths.append(markdown_path) + + for target in FASTOPENAPI_SPEC_TARGETS: # type: ignore + fastopenapi_path = fastopenapi_paths_by_name[target.filename] + markdown_path = temp_markdown_dir / f"{fastopenapi_path.stem}.md" + _convert_spec_to_markdown(fastopenapi_path, markdown_path) + + console_markdown_path = markdown_dir / f"{Path(CONSOLE_SWAGGER_FILENAME).stem}.md" + _append_fastopenapi_markdown(console_markdown_path, markdown_path) + + (markdown_dir / STALE_COMBINED_MARKDOWN_FILENAME).unlink(missing_ok=True) + finally: + if not keep_swagger_json: + for path in spec_paths: + path.unlink(missing_ok=True) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--swagger-dir", + type=Path, + default=Path("openapi"), + help="Directory where intermediate JSON spec files will be written.", + ) + parser.add_argument( + "--markdown-dir", + type=Path, + default=Path("openapi/markdown"), + help="Directory where split Markdown API docs will be written.", + ) + parser.add_argument( + "--keep-swagger-json", + action="store_true", + help="Keep intermediate JSON spec files after Markdown generation.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_markdown_docs( + args.swagger_dir, + args.markdown_dir, + keep_swagger_json=args.keep_swagger_json, + ) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_specs.py b/api/dev/generate_swagger_specs.py index 7e9688bfb4..9122f3ab24 100644 --- a/api/dev/generate_swagger_specs.py +++ b/api/dev/generate_swagger_specs.py @@ -9,12 +9,15 @@ which is unnecessary when the goal is only to serialize the Flask-RESTX from __future__ import annotations import argparse +import hashlib import json import logging import os import sys +from collections.abc import MutableMapping from dataclasses import dataclass from pathlib import Path +from typing import Protocol, TypeGuard from flask import Flask from flask_restx.swagger import Swagger @@ -30,19 +33,110 @@ if str(API_ROOT) not in sys.path: class SpecTarget: route: str filename: str + namespace: str + + +class RestxApi(Protocol): + models: MutableMapping[str, object] + + def model(self, name: str, model: dict[object, object]) -> object: ... SPEC_TARGETS: tuple[SpecTarget, ...] = ( - SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json"), - SpecTarget(route="/api/swagger.json", filename="web-swagger.json"), - SpecTarget(route="/v1/swagger.json", filename="service-swagger.json"), + SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json", namespace="console"), + SpecTarget(route="/api/swagger.json", filename="web-swagger.json", namespace="web"), + SpecTarget(route="/v1/swagger.json", filename="service-swagger.json", namespace="service"), ) _ORIGINAL_REGISTER_MODEL = Swagger.register_model _ORIGINAL_REGISTER_FIELD = Swagger.register_field -def _apply_runtime_defaults() -> None: +def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]: + """Return whether a nested field map is an anonymous inline mapping.""" + + from flask_restx.model import Model, OrderedModel + + return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel)) + + +def _jsonable_schema_value(value: object) -> object: + """Return a deterministic JSON-serializable representation for schema fingerprints.""" + + if value is None or isinstance(value, str | int | float | bool): + return value + if isinstance(value, list | tuple): + return [_jsonable_schema_value(item) for item in value] + if isinstance(value, dict): + return {str(key): _jsonable_schema_value(item) for key, item in value.items()} + value_type = type(value) + return f"<{value_type.__module__}.{value_type.__qualname__}>" + + +def _field_signature(field: object) -> object: + """Build a stable signature for a Flask-RESTX field object.""" + + from flask_restx import fields + from flask_restx.model import instance + + field_instance = instance(field) + signature: dict[str, object] = { + "class": f"{field_instance.__class__.__module__}.{field_instance.__class__.__qualname__}" + } + + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + signature["nested"] = _inline_model_signature(nested) + else: + signature["nested"] = getattr( + nested, + "name", + f"<{type(nested).__module__}.{type(nested).__qualname__}>", + ) + elif hasattr(field_instance, "container"): + signature["container"] = _field_signature(field_instance.container) + else: + schema = getattr(field_instance, "__schema__", None) + if isinstance(schema, dict): + signature["schema"] = _jsonable_schema_value(schema) + + for attr_name in ( + "attribute", + "default", + "description", + "example", + "max", + "min", + "nullable", + "readonly", + "required", + "title", + ): + if hasattr(field_instance, attr_name): + signature[attr_name] = _jsonable_schema_value(getattr(field_instance, attr_name)) + + return signature + + +def _inline_model_signature(nested_fields: dict[object, object]) -> object: + """Build a stable signature for an anonymous inline model.""" + + return [ + (str(field_name), _field_signature(field)) + for field_name, field in sorted(nested_fields.items(), key=lambda item: str(item[0])) + ] + + +def _inline_model_name(nested_fields: dict[object, object]) -> str: + """Return a stable Swagger model name for an anonymous inline field map.""" + + signature = json.dumps(_inline_model_signature(nested_fields), sort_keys=True, separators=(",", ":")) + digest = hashlib.sha1(signature.encode("utf-8")).hexdigest()[:12] + return f"_AnonymousInlineModel_{digest}" + + +def apply_runtime_defaults() -> None: """Force the small config surface required for Swagger generation.""" os.environ.setdefault("SECRET_KEY", "spec-export") @@ -74,25 +168,26 @@ def _patch_swagger_for_inline_nested_dicts() -> None: anonymous_models = getattr(self, "_anonymous_inline_models", None) if anonymous_models is None: anonymous_models = {} - self._anonymous_inline_models = anonymous_models + self.__dict__["_anonymous_inline_models"] = anonymous_models anonymous_name = anonymous_models.get(id(nested_fields)) if anonymous_name is None: - anonymous_name = f"_AnonymousInlineModel{len(anonymous_models) + 1}" + anonymous_name = _inline_model_name(nested_fields) anonymous_models[id(nested_fields)] = anonymous_name - self.api.model(anonymous_name, nested_fields) + if anonymous_name not in self.api.models: + self.api.model(anonymous_name, nested_fields) return self.api.models[anonymous_name] def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: - if isinstance(model, dict): + if _is_inline_field_map(model): model = get_or_create_inline_model(self, model) return _ORIGINAL_REGISTER_MODEL(self, model) def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: nested = getattr(field, "nested", None) - if isinstance(nested, dict): + if _is_inline_field_map(nested): field.model = get_or_create_inline_model(self, nested) # type: ignore _ORIGINAL_REGISTER_FIELD(self, field) @@ -105,22 +200,169 @@ def _patch_swagger_for_inline_nested_dicts() -> None: def create_spec_app() -> Flask: """Build a minimal Flask app that only mounts the Swagger-producing blueprints.""" - _apply_runtime_defaults() + apply_runtime_defaults() _patch_swagger_for_inline_nested_dicts() app = Flask(__name__) from controllers.console import bp as console_bp + from controllers.console import console_ns from controllers.service_api import bp as service_api_bp + from controllers.service_api import service_api_ns from controllers.web import bp as web_bp + from controllers.web import web_ns app.register_blueprint(console_bp) app.register_blueprint(web_bp) app.register_blueprint(service_api_bp) + for namespace in (console_ns, web_ns, service_api_ns): + for api in namespace.apis: + _materialize_inline_model_definitions(api) + return app +def _registered_models(namespace: str) -> dict[str, object]: + """Return the Flask-RESTX models registered for a Swagger namespace.""" + + if namespace == "console": + from controllers.console import console_ns + + models = dict(console_ns.models) + for api in console_ns.apis: + models.update(api.models) + return models + if namespace == "web": + from controllers.web import web_ns + + models = dict(web_ns.models) + for api in web_ns.apis: + models.update(api.models) + return models + if namespace == "service": + from controllers.service_api import service_api_ns + + models = dict(service_api_ns.models) + for api in service_api_ns.apis: + models.update(api.models) + return models + + raise ValueError(f"unknown Swagger namespace: {namespace}") + + +def _materialize_inline_model_definitions(api: RestxApi) -> None: + """Convert inline `fields.Nested({...})` maps into named API models.""" + + from flask_restx import fields + from flask_restx.model import Model, OrderedModel, instance + + inline_models: dict[int, dict[object, object]] = {} + inline_model_names: dict[int, str] = {} + + def collect_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested) and id(nested) not in inline_models: + inline_models[id(nested)] = nested + for nested_field in nested.values(): + collect_field(nested_field) + + container = getattr(field_instance, "container", None) + if container is not None: + collect_field(container) + + for model in list(api.models.values()): + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + collect_field(field) + + for nested_fields in sorted(inline_models.values(), key=_inline_model_name): + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + + def model_name_for(nested_fields: dict[object, object]) -> str: + anonymous_name = inline_model_names.get(id(nested_fields)) + if anonymous_name is None: + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + return anonymous_name + + def materialize_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + field_instance.model = api.models[model_name_for(nested)] # type: ignore[attr-defined] + + container = getattr(field_instance, "container", None) + if container is not None: + materialize_field(container) + + index = 0 + while index < len(api.models): + model = list(api.models.values())[index] + index += 1 + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + materialize_field(field) + + +def drop_null_values(value: object) -> object: + """Remove JSON null values that make the Markdown converter crash.""" + + if isinstance(value, dict): + return {key: drop_null_values(item) for key, item in value.items() if item is not None} + if isinstance(value, list): + return [drop_null_values(item) for item in value] + return value + + +def sort_openapi_arrays(value: object, *, parent_key: str | None = None) -> object: + """Sort order-insensitive Swagger arrays so generated Markdown is stable.""" + + if isinstance(value, dict): + return {key: sort_openapi_arrays(item, parent_key=key) for key, item in value.items()} + if not isinstance(value, list): + return value + + sorted_items = [sort_openapi_arrays(item, parent_key=parent_key) for item in value] + if parent_key == "parameters": + return sorted( + sorted_items, + key=lambda item: ( + item.get("in", "") if isinstance(item, dict) else "", + item.get("name", "") if isinstance(item, dict) else "", + json.dumps(item, sort_keys=True, default=str), + ), + ) + if parent_key in {"enum", "required", "schemes", "tags"}: + string_items = [item for item in sorted_items if isinstance(item, str)] + if len(string_items) == len(sorted_items): + return sorted(string_items) + return sorted_items + + +def _merge_registered_definitions(payload: dict[str, object], namespace: str) -> dict[str, object]: + """Include registered but route-indirect models in the exported Swagger definitions.""" + + definitions = payload.setdefault("definitions", {}) + if not isinstance(definitions, dict): + raise RuntimeError("unexpected Swagger definitions payload") + + for name, model in _registered_models(namespace).items(): + schema = getattr(model, "__schema__", None) + if isinstance(schema, dict): + definitions.setdefault(name, schema) + + return payload + + def generate_specs(output_dir: Path) -> list[Path]: """Write all Swagger specs to `output_dir` and return the written paths.""" @@ -138,6 +380,9 @@ def generate_specs(output_dir: Path) -> list[Path]: payload = response.get_json() if not isinstance(payload, dict): raise RuntimeError(f"unexpected response payload for {target.route}") + payload = _merge_registered_definitions(payload, target.namespace) + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) output_path = output_dir / target.filename output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") diff --git a/api/events/event_handlers/update_provider_when_message_created.py b/api/events/event_handlers/update_provider_when_message_created.py index 1d615f0f87..8dec5876a9 100644 --- a/api/events/event_handlers/update_provider_when_message_created.py +++ b/api/events/event_handlers/update_provider_when_message_created.py @@ -137,17 +137,13 @@ def handle(sender: Message, **kwargs): if used_quota is not None: match provider_configuration.system_configuration.current_quota_type: case ProviderQuotaType.TRIAL: - from services.credit_pool_service import CreditPoolService - - CreditPoolService.check_and_deduct_credits( + _deduct_credit_pool_quota_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="trial", ) case ProviderQuotaType.PAID: - from services.credit_pool_service import CreditPoolService - - CreditPoolService.check_and_deduct_credits( + _deduct_credit_pool_quota_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="paid", @@ -200,6 +196,26 @@ def handle(sender: Message, **kwargs): raise +def _deduct_credit_pool_quota_capped(*, tenant_id: str, credits_required: int, pool_type: str) -> None: + """Apply post-generation credit accounting without failing message persistence on quota exhaustion.""" + from services.credit_pool_service import CreditPoolService + + deducted_credits = CreditPoolService.deduct_credits_capped( + tenant_id=tenant_id, + credits_required=credits_required, + pool_type=pool_type, + ) + if deducted_credits < credits_required: + logger.warning( + "Credit pool exhausted during message-created accounting, " + "tenant_id=%s, pool_type=%s, credits_required=%s, credits_deducted=%s", + tenant_id, + pool_type, + credits_required, + deducted_credits, + ) + + def _calculate_quota_usage( *, message: Message, system_configuration: SystemConfiguration, model_name: str ) -> int | None: diff --git a/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py b/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py new file mode 100644 index 0000000000..eee58b6310 --- /dev/null +++ b/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py @@ -0,0 +1,26 @@ +"""add recommended app categories + +Revision ID: a4f2d8c9b731 +Revises: 227822d22895 +Create Date: 2026-04-29 12:00:00.000000 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a4f2d8c9b731" +down_revision = "227822d22895" +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table("recommended_apps", schema=None) as batch_op: + batch_op.add_column(sa.Column("categories", sa.JSON(), nullable=True)) + + +def downgrade(): + with op.batch_alter_table("recommended_apps", schema=None) as batch_op: + batch_op.drop_column("categories") diff --git a/api/models/model.py b/api/models/model.py index 25c330b062..f7f90465cf 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -878,6 +878,7 @@ class RecommendedApp(TypeBase): copyright: Mapped[str] = mapped_column(String(255), nullable=False) privacy_policy: Mapped[str] = mapped_column(String(255), nullable=False) category: Mapped[str] = mapped_column(String(255), nullable=False) + categories: Mapped[list[str] | None] = mapped_column(sa.JSON, nullable=True, default=None) custom_disclaimer: Mapped[str] = mapped_column(LongText, default="") position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) is_listed: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) diff --git a/api/openapi/markdown/console-swagger.md b/api/openapi/markdown/console-swagger.md new file mode 100644 index 0000000000..f4897e93c5 --- /dev/null +++ b/api/openapi/markdown/console-swagger.md @@ -0,0 +1,14766 @@ +# Console API +Console management APIs for app configuration, monitoring, and administration + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## console +Console management API operations + +### /account/avatar + +#### GET +##### Description + +Get account avatar url + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarQuery](#accountavatarquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarPayload](#accountavatarpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailSendPayload](#changeemailsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/check-email-unique + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CheckEmailUniquePayload](#checkemailuniquepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/reset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailResetPayload](#changeemailresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailValidityPayload](#changeemailvaliditypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletePayload](#accountdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/feedback + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletionFeedbackPayload](#accountdeletionfeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/verify + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationStatusResponse](#educationstatusresponse) | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationActivatePayload](#educationactivatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education/autocomplete + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationAutocompleteQuery](#educationautocompletequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationAutocompleteResponse](#educationautocompleteresponse) | + +### /account/education/verify + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationVerifyResponse](#educationverifyresponse) | + +### /account/init + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInitPayload](#accountinitpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/integrates + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountIntegrateListResponse](#accountintegratelistresponse) | + +### /account/interface-language + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceLanguagePayload](#accountinterfacelanguagepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/interface-theme + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceThemePayload](#accountinterfacethemepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountNamePayload](#accountnamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountPasswordPayload](#accountpasswordpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/profile + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/timezone + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountTimezonePayload](#accounttimezonepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /activate + +#### POST +##### Description + +Activate account with invitation token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivatePayload](#activatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Account activated successfully | [ActivationResponse](#activationresponse) | +| 400 | Already activated or invalid token | | + +### /activate/check + +#### GET +##### Description + +Check if activation token is valid + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivateCheckQuery](#activatecheckquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ActivationCheckResponse](#activationcheckresponse) | + +### /admin/batch_add_notification_accounts + +#### POST +##### Description + +Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a 'file' field (CSV or TXT, one email per line) plus a 'notification_id' field. Emails that do not match any account are silently skipped. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Accounts added successfully | + +### /admin/delete-explore-banner/{banner_id} + +#### DELETE +##### Description + +Delete an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| banner_id | path | Banner ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Banner deleted successfully | + +### /admin/insert-explore-apps + +#### POST +##### Description + +Insert or update an app in the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreAppPayload](#insertexploreapppayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | App updated successfully | +| 201 | App inserted successfully | +| 404 | App not found | + +### /admin/insert-explore-apps/{app_id} + +#### DELETE +##### Description + +Remove an app from the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID to remove | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App removed successfully | + +### /admin/insert-explore-banner + +#### POST +##### Description + +Insert an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreBannerPayload](#insertexplorebannerpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Banner inserted successfully | + +### /admin/upsert_notification + +#### POST +##### Description + +Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpsertNotificationPayload](#upsertnotificationpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Notification upserted successfully | + +### /all-workspaces + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceListQuery](#workspacelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-based-extension + +#### GET +##### Description + +Get all API-based extensions for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionListResponse](#apibasedextensionlistresponse) | + +#### POST +##### Description + +Create a new API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Extension created successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-based-extension/{id} + +#### DELETE +##### Description + +Delete API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Extension deleted successfully | + +#### GET +##### Description + +Get API-based extension by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +#### POST +##### Description + +Update API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Extension updated successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-key-auth/data-source + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/binding + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiKeyAuthBindingPayload](#apikeyauthbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/{binding_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/prompt-templates + +#### GET +##### Description + +Get advanced prompt templates based on app mode and model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedPromptTemplateQuery](#advancedprompttemplatequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Prompt templates retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps + +#### GET +##### Summary + +Get app list + +##### Description + +Get list of applications with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppListQuery](#applistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppPagination](#apppagination) | + +#### POST +##### Summary + +Create app + +##### Description + +Create a new application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAppPayload](#createapppayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App created successfully | [AppDetail](#appdetail) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppImportPayload](#appimportpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import completed | [Import](#import) | +| 202 | Import pending confirmation | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/imports/{app_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dependencies checked | [CheckDependenciesResult](#checkdependenciesresult) | + +### /apps/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import confirmed | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/workflows/online-users + +#### POST +##### Description + +Get workflow online users + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowOnlineUsersPayload](#workflowonlineuserspayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id} + +#### DELETE +##### Summary + +Delete app + +##### Description + +Delete application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App deleted successfully | +| 403 | Insufficient permissions | + +#### GET +##### Summary + +Get app detail + +##### Description + +Get application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppDetailWithSite](#appdetailwithsite) | + +#### PUT +##### Summary + +Update app + +##### Description + +Update application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAppPayload](#updateapppayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App updated successfully | [AppDetailWithSite](#appdetailwithsite) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/advanced-chat/workflow-runs + +#### GET +##### Summary + +Get advanced chat app workflow run list + +##### Description + +Get advanced chat workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [AdvancedChatWorkflowRunPagination](#advancedchatworkflowrunpagination) | + +### /apps/{app_id}/advanced-chat/workflow-runs/count + +#### GET +##### Summary + +Get advanced chat workflow runs count statistics + +##### Description + +Get advanced chat workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow for advanced chat application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedChatWorkflowRunPayload](#advancedchatworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow run started successfully | +| 400 | Invalid request parameters | +| 403 | Permission denied | + +### /apps/{app_id}/agent/logs + +#### GET +##### Summary + +Get agent logs + +##### Description + +Get agent execution logs for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AgentLogQuery](#agentlogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Agent logs retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps/{app_id}/annotation-reply/{action} + +#### POST +##### Description + +Enable or disable annotation reply for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyPayload](#annotationreplypayload) | +| action | path | Action to perform (enable/disable) | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-reply/{action}/status/{job_id} + +#### GET +##### Description + +Get status of annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-setting + +#### GET +##### Description + +Get annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotation settings retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-settings/{annotation_setting_id} + +#### POST +##### Description + +Update annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationSettingUpdatePayload](#annotationsettingupdatepayload) | +| annotation_setting_id | path | Annotation setting ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Settings updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get annotations for an app with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationListQuery](#annotationlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotations retrieved successfully | +| 403 | Insufficient permissions | + +#### POST +##### Description + +Create a new annotation for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAnnotationPayload](#createannotationpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/batch-import + +#### POST +##### Description + +Batch import annotations from CSV file with rate limiting and security checks + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Batch import started successfully | +| 400 | No file uploaded or too many files | +| 403 | Insufficient permissions | +| 413 | File too large | +| 429 | Too many requests or concurrent imports | + +### /apps/{app_id}/annotations/batch-import-status/{job_id} + +#### GET +##### Description + +Get status of batch import job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations/count + +#### GET +##### Description + +Get count of message annotations for the app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation count retrieved successfully | [AnnotationCountResponse](#annotationcountresponse) | + +### /apps/{app_id}/annotations/export + +#### GET +##### Description + +Export all annotations for an app with CSV injection protection + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations exported successfully | [AnnotationExportList](#annotationexportlist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | | Yes | string | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Description + +Update or delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAnnotationPayload](#updateannotationpayload) | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 204 | Annotation deleted successfully | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id}/hit-histories + +#### GET +##### Description + +Get hit histories for an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | +| limit | query | Page size | No | integer | +| page | query | Page number | No | integer | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit histories retrieved successfully | [AnnotationHitHistoryList](#annotationhithistorylist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/api-enable + +#### POST +##### Description + +Enable or disable app API + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppApiStatusPayload](#appapistatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/audio-to-text + +#### POST +##### Description + +Transcript audio to text for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Audio transcription successful | [AudioTranscriptResponse](#audiotranscriptresponse) | +| 400 | Bad request - No audio uploaded or unsupported type | | +| 413 | Audio file too large | | + +### /apps/{app_id}/chat-conversations + +#### GET +##### Description + +Get chat conversations with pagination, filtering and summary + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatConversationQuery](#chatconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationWithSummaryPagination](#conversationwithsummarypagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/chat-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a chat conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get chat conversation details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationDetail](#conversationdetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages + +#### GET +##### Description + +Get chat messages for a conversation with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagesQuery](#chatmessagesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [MessageInfiniteScrollPaginationResponse](#messageinfinitescrollpaginationresponse) | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested questions for a message + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Suggested questions retrieved successfully | [SuggestedQuestionsResponse](#suggestedquestionsresponse) | +| 404 | Message or conversation not found | | + +### /apps/{app_id}/chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/completion-conversations + +#### GET +##### Description + +Get completion conversations with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionConversationQuery](#completionconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationPagination](#conversationpagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/completion-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a completion conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get completion conversation details with messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationMessageDetail](#conversationmessagedetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/completion-messages + +#### POST +##### Description + +Generate completion message for debugging + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion generated successfully | +| 400 | Invalid request parameters | +| 404 | App not found | + +### /apps/{app_id}/completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/conversation-variables + +#### GET +##### Description + +Get conversation variables for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [PaginatedConversationVariableResponse](#paginatedconversationvariableresponse) | + +### /apps/{app_id}/convert-to-workflow + +#### POST +##### Summary + +Convert basic mode of chatbot app to workflow mode + +##### Description + +Convert application to workflow mode +Convert expert mode of chatbot app to workflow mode +Convert Completion App to Workflow App + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConvertToWorkflowPayload](#converttoworkflowpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application converted to workflow successfully | +| 400 | Application cannot be converted | +| 403 | Permission denied | + +### /apps/{app_id}/copy + +#### POST +##### Summary + +Copy app + +##### Description + +Create a copy of an existing application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CopyAppPayload](#copyapppayload) | +| app_id | path | Application ID to copy | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App copied successfully | [AppDetailWithSite](#appdetailwithsite) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/export + +#### GET +##### Summary + +Export app + +##### Description + +Export application configuration as DSL + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppExportQuery](#appexportquery) | +| app_id | path | Application ID to export | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App exported successfully | [AppExportResponse](#appexportresponse) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/feedbacks + +#### POST +##### Description + +Create or update message feedback (like/dislike) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback updated successfully | +| 403 | Insufficient permissions | +| 404 | Message not found | + +### /apps/{app_id}/feedbacks/export + +#### GET +##### Description + +Export user feedback data for Google Sheets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackExportQuery](#feedbackexportquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback data exported successfully | +| 400 | Invalid parameters | +| 500 | Internal server error | + +### /apps/{app_id}/icon + +#### POST +##### Description + +Update application icon + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppIconPayload](#appiconpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Icon updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/messages/{message_id} + +#### GET +##### Description + +Get message details by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Message retrieved successfully | [MessageDetailResponse](#messagedetailresponse) | +| 404 | Message not found | | + +### /apps/{app_id}/model-config + +#### POST +##### Summary + +Modify app model config + +##### Description + +Update application model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ModelConfigRequest](#modelconfigrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Model configuration updated successfully | +| 400 | Invalid configuration | +| 404 | App not found | + +### /apps/{app_id}/name + +#### POST +##### Description + +Check if app name is available + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppNamePayload](#appnamepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Name availability checked | [AppDetail](#appdetail) | + +### /apps/{app_id}/publish-to-creators-platform + +#### POST +##### Summary + +Publish app to Creators Platform + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/server + +#### GET +##### Description + +Get MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration retrieved successfully | [AppMCPServerResponse](#appmcpserverresponse) | + +#### POST +##### Description + +Create MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerCreatePayload](#mcpservercreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | MCP server configuration created successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | + +#### PUT +##### Description + +Update MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerUpdatePayload](#mcpserverupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration updated successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /apps/{app_id}/site + +#### POST +##### Description + +Update application site configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteUpdatePayload](#appsiteupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site configuration updated successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions | | +| 404 | App not found | | + +### /apps/{app_id}/site-enable + +#### POST +##### Description + +Enable or disable app site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteStatusPayload](#appsitestatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/site/access-token-reset + +#### POST +##### Description + +Reset access token for application site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Access token reset successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions (admin/owner required) | | +| 404 | App or site not found | | + +### /apps/{app_id}/statistics/average-response-time + +#### GET +##### Description + +Get average response time statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average response time statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/average-session-interactions + +#### GET +##### Description + +Get average session interaction statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average session interaction statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-conversations + +#### GET +##### Description + +Get daily conversation statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily conversation statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-end-users + +#### GET +##### Description + +Get daily terminal/end-user statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily terminal statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-messages + +#### GET +##### Description + +Get daily message statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily message statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/token-costs + +#### GET +##### Description + +Get daily token cost statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily token cost statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/tokens-per-second + +#### GET +##### Description + +Get tokens per second statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tokens per second statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/user-satisfaction-rate + +#### GET +##### Description + +Get user satisfaction rate statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | User satisfaction rate statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/text-to-audio + +#### POST +##### Description + +Convert text to speech for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechPayload](#texttospeechpayload) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text to speech conversion successful | +| 400 | Bad request - Invalid parameters | + +### /apps/{app_id}/text-to-audio/voices + +#### GET +##### Description + +Get available TTS voices for a specific language + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechVoiceQuery](#texttospeechvoicequery) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | TTS voices retrieved successfully | [ object ] | +| 400 | Invalid language parameter | | + +### /apps/{app_id}/trace + +#### GET +##### Summary + +Get app trace + +##### Description + +Get app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration retrieved successfully | + +#### POST +##### Description + +Update app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppTracePayload](#apptracepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/trace-config + +#### DELETE +##### Summary + +Delete an existing trace app configuration + +##### Description + +Delete an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tracing configuration deleted successfully | +| 400 | Invalid request parameters or configuration not found | + +#### GET +##### Description + +Get tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration retrieved successfully | object | +| 400 | Invalid request parameters | | + +#### PATCH +##### Summary + +Update an existing trace app configuration + +##### Description + +Update an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration updated successfully | object | +| 400 | Invalid request parameters or configuration not found | | + +#### POST +##### Summary + +Create a new trace app configuration + +##### Description + +Create a new tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Tracing configuration created successfully | object | +| 400 | Invalid request parameters or configuration already exists | | + +### /apps/{app_id}/trigger-enable + +#### POST +##### Summary + +Update app trigger (enable/disable) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ParserEnable](#parserenable) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerResponse](#workflowtriggerresponse) | + +### /apps/{app_id}/triggers + +#### GET +##### Summary + +Get app triggers list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerListResponse](#workflowtriggerlistresponse) | + +### /apps/{app_id}/workflow-app-logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow application execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow app logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | + +### /apps/{app_id}/workflow-archived-logs + +#### GET +##### Summary + +Get workflow archived logs + +##### Description + +Get workflow archived execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow archived logs retrieved successfully | [WorkflowArchivedLogPaginationResponse](#workflowarchivedlogpaginationresponse) | + +### /apps/{app_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Description + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [WorkflowRunPagination](#workflowrunpagination) | + +### /apps/{app_id}/workflow-runs/count + +#### GET +##### Summary + +Get workflow runs count statistics + +##### Description + +Get workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 403 | Permission denied | +| 404 | Task not found | + +### /apps/{app_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Description + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run detail retrieved successfully | [WorkflowRunDetail](#workflowrundetail) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow-runs/{run_id}/export + +#### GET +##### Description + +Generate a download URL for an archived workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Export URL generated | [WorkflowRunExport](#workflowrunexport) | + +### /apps/{app_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Description + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node executions retrieved successfully | [WorkflowRunNodeExecutionList](#workflowrunnodeexecutionlist) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow/comments + +#### GET +##### Summary + +Get all comments for a workflow + +##### Description + +Get all comments for a workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comments retrieved successfully | [WorkflowCommentBasic](#workflowcommentbasic) | + +#### POST +##### Summary + +Create a new workflow comment + +##### Description + +Create a new workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentCreatePayload](#workflowcommentcreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Comment created successfully | [WorkflowCommentCreate](#workflowcommentcreate) | + +### /apps/{app_id}/workflow/comments/mention-users + +#### GET +##### Summary + +Get all users in current tenant for mentions + +##### Description + +Get all users in current tenant for mentions + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Mentionable users retrieved successfully | [WorkflowCommentMentionUsersPayload](#workflowcommentmentionuserspayload) | + +### /apps/{app_id}/workflow/comments/{comment_id} + +#### DELETE +##### Summary + +Delete a workflow comment + +##### Description + +Delete a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Comment deleted successfully | + +#### GET +##### Summary + +Get a specific workflow comment + +##### Description + +Get a specific workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment retrieved successfully | [WorkflowCommentDetail](#workflowcommentdetail) | + +#### PUT +##### Summary + +Update a workflow comment + +##### Description + +Update a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentUpdatePayload](#workflowcommentupdatepayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment updated successfully | [WorkflowCommentUpdate](#workflowcommentupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies + +#### POST +##### Summary + +Add a reply to a workflow comment + +##### Description + +Add a reply to a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Reply created successfully | [WorkflowCommentReplyCreate](#workflowcommentreplycreate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id} + +#### DELETE +##### Summary + +Delete a comment reply + +##### Description + +Delete a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Reply deleted successfully | + +#### PUT +##### Summary + +Update a comment reply + +##### Description + +Update a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Reply updated successfully | [WorkflowCommentReplyUpdate](#workflowcommentreplyupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/resolve + +#### POST +##### Summary + +Resolve a workflow comment + +##### Description + +Resolve a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment resolved successfully | [WorkflowCommentResolve](#workflowcommentresolve) | + +### /apps/{app_id}/workflow/statistics/average-app-interactions + +#### GET +##### Description + +Get workflow average app interaction statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Average app interaction statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-conversations + +#### GET +##### Description + +Get workflow daily runs statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily runs statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-terminals + +#### GET +##### Description + +Get workflow daily terminals statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily terminals statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/token-costs + +#### GET +##### Description + +Get workflow daily token cost statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily token cost statistics retrieved successfully | + +### /apps/{app_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Description + +Get all published workflows for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowListQuery](#workflowlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflows retrieved successfully | [WorkflowPagination](#workflowpagination) | + +### /apps/{app_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configurations for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configurations retrieved successfully | + +### /apps/{app_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configuration by type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DefaultBlockConfigQuery](#defaultblockconfigquery) | +| app_id | path | Application ID | Yes | string | +| block_type | path | Block type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configuration retrieved successfully | +| 404 | Block type not found | + +### /apps/{app_id}/workflows/draft + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Draft workflow not found | | + +#### POST +##### Summary + +Sync draft workflow + +##### Description + +Sync draft workflow configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SyncDraftWorkflowPayload](#syncdraftworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow synced successfully | [SyncDraftWorkflowResponse](#syncdraftworkflowresponse) | +| 400 | Invalid workflow configuration | | +| 403 | Permission denied | | + +### /apps/{app_id}/workflows/draft/conversation-variables + +#### GET +##### Description + +Get conversation variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | +| 404 | Draft workflow not found | | + +#### POST +##### Description + +Update conversation variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation variables updated successfully | + +### /apps/{app_id}/workflows/draft/environment-variables + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get environment variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables retrieved successfully | +| 404 | Draft workflow not found | + +#### POST +##### Description + +Update environment variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EnvironmentVariableUpdatePayload](#environmentvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables updated successfully | + +### /apps/{app_id}/workflows/draft/features + +#### POST +##### Description + +Update draft workflow features + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowFeaturesPayload](#workflowfeaturespayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow features updated successfully | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test + +#### POST +##### Summary + +Test human input delivery + +##### Description + +Test human input delivery for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputDeliveryTestPayload](#humaninputdeliverytestpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Description + +Get last run result for draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node last run retrieved successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node last run not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Description + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowNodeRunPayload](#draftworkflownoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node run started successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute single node when event arrives + +##### Description + +Poll for trigger events and execute single node when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and node executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Description + +Delete all variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Node variables deleted successfully | + +#### GET +##### Description + +Get variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Draft workflow run started successfully | +| 403 | Permission denied | + +### /apps/{app_id}/workflows/draft/system-variables + +#### GET +##### Description + +Get system variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | System variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute full workflow when event arrives + +##### Description + +Poll for trigger events and execute full workflow when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunRequest](#draftworkflowtriggerrunrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/trigger/run-all + +#### POST +##### Summary + +Full workflow debug when the start node is a trigger + +##### Description + +Full workflow debug when the start node is a trigger + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunAllPayload](#draftworkflowtriggerrunallpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/variables + +#### DELETE +##### Description + +Delete all draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Workflow variables deleted successfully | + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableListQuery](#workflowdraftvariablelistquery) | +| app_id | path | Application ID | Yes | string | +| limit | query | Number of items per page (1-100) | No | string | +| page | query | Page number (1-100000) | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow variables retrieved successfully | [WorkflowDraftVariableListWithoutValue](#workflowdraftvariablelistwithoutvalue) | + +### /apps/{app_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Description + +Delete a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Variable deleted successfully | +| 404 | Variable not found | + +#### GET +##### Description + +Get a specific workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable retrieved successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +#### PATCH +##### Description + +Update a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableUpdatePayload](#workflowdraftvariableupdatepayload) | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Description + +Reset a workflow variable to its default value + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable reset successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 204 | Variable reset (no content) | | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/publish + +#### GET +##### Summary + +Get published workflow + +##### Description + +Get published workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Published workflow not found | | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PublishWorkflowPayload](#publishworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/triggers/webhook + +#### GET +##### Summary + +Get webhook trigger for a node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WebhookTriggerResponse](#webhooktriggerresponse) | + +### /apps/{app_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Description + +Update workflow by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowUpdatePayload](#workflowupdatepayload) | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Workflow ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow updated successfully | [Workflow](#workflow) | +| 403 | Permission denied | | +| 404 | Workflow not found | | + +### /apps/{app_id}/workflows/{workflow_id}/restore + +#### POST +##### Description + +Restore a published workflow version into the draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Published workflow ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow restored successfully | +| 400 | Source workflow must be published | +| 404 | Workflow not found | + +### /apps/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for an app + +##### Description + +Get all API keys for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for an app + +##### Description + +Create a new API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /apps/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for an app + +##### Description + +Delete an API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /apps/{server_id}/server/refresh + +#### GET +##### Description + +Refresh MCP server configuration and regenerate server code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| server_id | path | Server ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server refreshed successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /auth/plugin/datasource/default-list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialPayload](#datasourcecredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCustomClientPayload](#datasourcecustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/default + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceDefaultPayload](#datasourcedefaultpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialDeletePayload](#datasourcecredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialUpdatePayload](#datasourcecredentialupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update-name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceUpdateNamePayload](#datasourceupdatenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/invoices + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/partners/{partner_key}/tenants + +#### PUT +##### Description + +Sync partner tenants bindings + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PartnerTenantsPayload](#partnertenantspayload) | +| partner_key | path | Partner key | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tenants synced to partner successfully | +| 400 | Invalid partner information | + +### /billing/subscription + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /code-based-extension + +#### GET +##### Description + +Get code-based extension data by module name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| module | query | Extension module name | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [CodeBasedExtensionResponse](#codebasedextensionresponse) | + +### /compliance/download + +#### GET +##### Description + +Get compliance document download link + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ComplianceDownloadQuery](#compliancedownloadquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates/{binding_id}/{action} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets + +#### GET +##### Description + +Get list of datasets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| ids | query | Filter by dataset IDs (list) | No | string | +| include_all | query | Include all datasets (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| tag_ids | query | Filter by tag IDs (list) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | + +#### POST +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Dataset created successfully | +| 400 | Invalid request parameters | + +### /datasets/api-base-info + +#### GET +##### Description + +Get dataset API base information + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | API base info retrieved successfully | + +### /datasets/api-keys + +#### GET +##### Description + +Get dataset API keys + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/api-keys/{api_key_id} + +#### DELETE +##### Description + +Delete dataset API key + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /datasets/batch_import_status/{job_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external + +#### POST +##### Description + +Create external knowledge dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalDatasetCreatePayload](#externaldatasetcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | External dataset created successfully | [DatasetDetail](#datasetdetail) | +| 400 | Invalid parameters | | +| 403 | Permission denied | | + +### /datasets/external-knowledge-api + +#### GET +##### Description + +Get external knowledge API templates + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API templates retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get external knowledge API template details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API template retrieved successfully | +| 404 | Template not found | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id}/use-check + +#### GET +##### Description + +Check if external knowledge API is being used + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Usage check completed successfully | + +### /datasets/indexing-estimate + +#### POST +##### Description + +Estimate dataset indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IndexingEstimatePayload](#indexingestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | + +### /datasets/init + +#### POST +##### Description + +Initialize dataset with documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Dataset initialized successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | +| 400 | Invalid request parameters | | + +### /datasets/metadata/built-in + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/notion-indexing-estimate + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/process-rule + +#### GET +##### Description + +Get dataset document processing rules + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| document_id | query | Document ID (optional) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Process rules retrieved successfully | + +### /datasets/retrieval-setting + +#### GET +##### Description + +Get dataset retrieval settings + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Retrieval settings retrieved successfully | + +### /datasets/retrieval-setting/{vector_type} + +#### GET +##### Description + +Get mock dataset retrieval settings by vector type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| vector_type | path | Vector store type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Mock retrieval settings retrieved successfully | + +### /datasets/{dataset_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset retrieved successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +#### PATCH +##### Description + +Update dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset updated successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/api-keys/{status} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| status | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/auto-disable-logs + +#### GET +##### Description + +Get dataset auto disable logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Auto disable logs retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/batch/{batch}/indexing-estimate + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/batch/{batch}/indexing-status + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| fetch | query | Fetch full details (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| sort | query | Sort order (default: -created_at) | No | string | +| status | query | Filter documents by display status | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Documents created successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Summary + +Stream a ZIP archive containing the requested uploaded documents + +##### Description + +Download selected dataset documents as a single ZIP archive (upload-file only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/generate-summary + +#### POST +##### Summary + +Generate summary index for specified documents + +##### Description + +Generate summary index for documents +This endpoint checks if the dataset configuration supports summary generation +(indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), +then asynchronously generates summary indexes for the provided documents. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [GenerateSummaryPayload](#generatesummarypayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary generation started successfully | +| 400 | Invalid request or dataset configuration | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/status/{action}/batch + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get document details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| metadata | query | Metadata inclusion (all/only/without) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a dataset document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-estimate + +#### GET +##### Description + +Estimate document indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | +| 400 | Document already finished | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-status + +#### GET +##### Description + +Get document indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/metadata + +#### PUT +##### Description + +Update document metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentMetadataUpdatePayload](#documentmetadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document metadata updated successfully | +| 403 | Permission denied | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/pause + +#### PATCH +##### Summary + +pause document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/resume + +#### PATCH +##### Summary + +recover document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/{action} + +#### PATCH +##### Description + +Update document processing status (pause/resume) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform (pause/resume) | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Processing status updated successfully | +| 400 | Invalid action | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/rename + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRenamePayload](#documentrenamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Document renamed successfully | [DocumentResponse](#documentresponse) | + +### /datasets/{dataset_id}/documents/{document_id}/segment + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segment/{action} + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/batch_import + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/summary-status + +#### GET +##### Summary + +Get summary index generation status for a document + +##### Description + +Get summary index generation status for a document +Returns: +- total_segments: Total number of segments in the document +- summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records +- summaries: List of summary records with status and content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/website-sync + +#### GET +##### Summary + +sync website document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/error-docs + +#### GET +##### Description + +Get dataset error documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Error documents retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/external-hit-testing + +#### POST +##### Description + +Test external knowledge retrieval for dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalHitTestingPayload](#externalhittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External hit testing completed successfully | +| 400 | Invalid parameters | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Description + +Test dataset knowledge retrieval + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit testing completed successfully | [HitTestingResponse](#hittestingresponse) | +| 400 | Invalid parameters | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/indexing-status + +#### GET +##### Description + +Get dataset indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/permission-part-users + +#### GET +##### Description + +Get dataset permission user list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Permission users retrieved successfully | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/queries + +#### GET +##### Description + +Get dataset query history + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Query history retrieved successfully | [DatasetQueryDetail](#datasetquerydetail) | + +### /datasets/{dataset_id}/related-apps + +#### GET +##### Description + +Get applications related to dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Related apps retrieved successfully | [RelatedAppList](#relatedapplist) | + +### /datasets/{dataset_id}/retry + +#### POST +##### Summary + +retry document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRetryPayload](#documentretrypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/use-check + +#### GET +##### Description + +Check if dataset is in use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset use status retrieved successfully | + +### /datasets/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for a dataset + +##### Description + +Get all API keys for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for a dataset + +##### Description + +Create a new API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for a dataset + +##### Description + +Delete an API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /email-code-login + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-code-login/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginPayload](#emailcodeloginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/send-email + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/validity + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /explore/apps + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| language | query | Language code for recommended app localization | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [RecommendedAppListResponse](#recommendedapplistresponse) | + +### /explore/apps/{app_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /features + +#### GET +##### Summary + +Get feature configuration for current tenant + +##### Description + +Get feature configuration for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [FeatureResponse](#featureresponse) | + +### /files/support-type + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /files/upload + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [UploadConfig](#uploadconfig) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | + +### /files/{file_id}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| file_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Email sent successfully | [ForgotPasswordEmailResponse](#forgotpasswordemailresponse) | +| 400 | Invalid email or rate limit exceeded | | + +### /forgot-password/resets + +#### POST +##### Description + +Reset password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Password reset successfully | [ForgotPasswordResetResponse](#forgotpasswordresetresponse) | +| 400 | Invalid token or password mismatch | | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Code verified successfully | [ForgotPasswordCheckResponse](#forgotpasswordcheckresponse) | +| 400 | Invalid code or token | | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by form token + +##### Description + +GET /console/api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by form token + +##### Description + +POST /console/api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /info + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /installed-apps + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [InstalledAppListResponse](#installedapplistresponse) | + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionMessageExplorePayload](#completionmessageexplorepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/pin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/unpin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/feedbacks + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/more-like-this + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MoreLikeThisQuery](#morelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/meta + +#### GET +##### Summary + +Get app meta + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageListQuery](#savedmessagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageCreatePayload](#savedmessagecreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages/{message_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /instruction-generate + +#### POST +##### Description + +Generate instruction for workflow nodes or general use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionGeneratePayload](#instructiongeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Instruction generated successfully | +| 400 | Invalid request parameters or flow/workflow not found | +| 402 | Provider quota exceeded | + +### /instruction-generate/template + +#### POST +##### Description + +Get instruction generation template + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionTemplatePayload](#instructiontemplatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Template retrieved successfully | +| 400 | Invalid request parameters | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /logout + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /mcp/oauth/callback + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notification + +#### GET +##### Description + +Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success — inspect should_show to decide whether to render the modal | +| 401 | Unauthorized | + +### /notification/dismiss + +#### POST +##### Description + +Mark a notification as dismissed for the current user. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 401 | Unauthorized | + +### /notion/pages/{page_id}/{page_type}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notion/pre-import/pages + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/authorize/{provider} + +#### GET +##### Description + +Handle OAuth callback and complete login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| state | query | Optional state parameter (used for invite token) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with access token | +| 400 | OAuth process failed | + +### /oauth/data-source/binding/{provider} + +#### GET +##### Description + +Bind OAuth data source with authorization code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source binding success | [OAuthDataSourceBindingResponse](#oauthdatasourcebindingresponse) | +| 400 | Invalid provider or code | | + +### /oauth/data-source/callback/{provider} + +#### GET +##### Description + +Handle OAuth callback from data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| error | query | Error message from OAuth provider | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with result | +| 400 | Invalid provider | + +### /oauth/data-source/{provider} + +#### GET +##### Description + +Get OAuth authorization URL for data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Authorization URL or internal setup success | [OAuthDataSourceResponse](#oauthdatasourceresponse) | +| 400 | Invalid provider | | +| 403 | Admin privileges required | | + +### /oauth/data-source/{provider}/{binding_id}/sync + +#### GET +##### Description + +Sync data from OAuth data source + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | Data source binding ID | Yes | string | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source sync success | [OAuthDataSourceSyncResponse](#oauthdatasourcesyncresponse) | +| 400 | Invalid provider or sync failed | | + +### /oauth/login/{provider} + +#### GET +##### Description + +Initiate OAuth login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| invite_token | query | Optional invitation token | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to OAuth authorization URL | +| 400 | Invalid provider | + +### /oauth/plugin/{provider_id}/datasource/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider_id}/datasource/get-authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/trigger/callback + +#### GET +##### Summary + +Handle OAuth callback for trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/account + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/authorize + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/customized/templates/{template_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/dataset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineDatasetImportPayload](#ragpipelinedatasetimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/empty-dataset + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates/{template_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/datasource-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineImportPayload](#ragpipelineimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{pipeline_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/recommended-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/transform/datasets/{dataset_id} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/customized/publish + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Payload](#payload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/exports + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| block_type | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft + +#### GET +##### Summary + +Get draft rag pipeline's workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Sync draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect + +#### POST +##### Summary + +Set datasource variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceVariablesPayload](#datasourcevariablespayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/environment-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunRequiredPayload](#noderunrequiredpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/system-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/publish + +#### GET +##### Summary + +Get published pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview + +#### POST +##### Summary + +Run datasource content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/run + +#### POST +##### Summary + +Run published workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [PublishedWorkflowRunPayload](#publishedworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete a published workflow version that is not currently active on the pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /refresh-token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/{url} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /reset-password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rule-code-generate + +#### POST +##### Description + +Generate code rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleCodeGeneratePayload](#rulecodegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Code rules generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-generate + +#### POST +##### Description + +Generate rule configuration using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleGeneratePayload](#rulegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Rule configuration generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-structured-output-generate + +#### POST +##### Description + +Generate structured output rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleStructuredOutputPayload](#rulestructuredoutputpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Structured output generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /spec/schema-definitions + +#### GET +##### Summary + +Get system JSON Schema definitions specification + +##### Description + +Used for frontend component type mapping + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /system-features + +#### GET +##### Summary + +Get system-wide feature configuration + +##### Description + +Get system-wide feature configuration +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for dashboard initialization. + +Authentication would create circular dependency (can't login without dashboard loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [SystemFeatureResponse](#systemfeatureresponse) | + +### /tag-bindings + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tag-bindings/remove + +#### POST +##### Description + +Remove one or more tag bindings from a target. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingRemovePayload](#tagbindingremovepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword for tag name. | No | string | +| type | query | Tag type filter. Can be "knowledge" or "app". | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ [TagResponse](#tagresponse) ] | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags/{tag_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /test/retrieval + +#### POST +##### Description + +Bedrock retrieval test (internal use only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [BedrockRetrievalPayload](#bedrockretrievalpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Bedrock retrieval test completed | + +### /trial-apps/{app_id} + +#### GET +##### Summary + +Get app detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ChatRequest](#chatrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionRequest](#completionrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/datasets + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Returns the site configuration for the application including theme, icons, and text. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [TextToSpeechRequest](#texttospeechrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows + +#### GET +##### Summary + +Get workflow detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunRequest](#workflowrunrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /website/crawl + +#### POST +##### Description + +Crawl website content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlPayload](#websitecrawlpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Website crawl initiated successfully | +| 400 | Invalid crawl parameters | + +### /website/crawl/status/{job_id} + +#### GET +##### Description + +Get website crawl status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlStatusQuery](#websitecrawlstatusquery) | +| job_id | path | Crawl job ID | Yes | string | +| provider | query | Crawl provider (firecrawl/watercrawl/jinareader) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Crawl status retrieved successfully | +| 400 | Invalid provider | +| 404 | Crawl job not found | + +### /workflow/{workflow_run_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /console/api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workflow/{workflow_run_id}/pause-details + +#### GET +##### Summary + +Get workflow pause details + +##### Description + +GET /console/api/workflow//pause-details + +Returns information about why and where the workflow is paused. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /workspaces/current/agent-provider/{provider_name} + +#### GET +##### Description + +Get specific agent provider details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_name | path | Agent provider name | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | object | + +### /workspaces/current/agent-providers + +#### GET +##### Description + +Get list of available agent providers + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ object ] | + +### /workspaces/current/dataset-operators + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/default-model + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGetDefault](#parsergetdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPostDefault](#parserpostdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/endpoints + +#### POST +##### Description + +Create a new plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/create + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/delete + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/disable + +#### POST +##### Description + +Disable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint disabled successfully | [EndpointDisableResponse](#endpointdisableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/enable + +#### POST +##### Description + +Enable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint enabled successfully | [EndpointEnableResponse](#endpointenableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/list + +#### GET +##### Description + +List plugin endpoints with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListQuery](#endpointlistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EndpointListResponse](#endpointlistresponse) | + +### /workspaces/current/endpoints/list/plugin + +#### GET +##### Description + +List endpoints for a specific plugin + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListForPluginQuery](#endpointlistforpluginquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [PluginEndpointListResponse](#pluginendpointlistresponse) | + +### /workspaces/current/endpoints/update + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LegacyEndpointUpdatePayload](#legacyendpointupdatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/{id} + +#### DELETE +##### Description + +Delete a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +#### PATCH +##### Description + +Update a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointUpdatePayload](#endpointupdatepayload) | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/members + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/members/invite-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MemberInvitePayload](#memberinvitepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/owner-transfer-check + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferCheckPayload](#ownertransfercheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/send-owner-transfer-confirm-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferEmailPayload](#ownertransferemailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/owner-transfer + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [OwnerTransferPayload](#ownertransferpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/update-role + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [MemberRoleUpdatePayload](#memberroleupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserModelList](#parsermodellist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/checkout-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialDelete](#parsercredentialdelete) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialId](#parsercredentialid) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialCreate](#parsercredentialcreate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialUpdate](#parsercredentialupdate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialSwitch](#parsercredentialswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialValidate](#parsercredentialvalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPostModels](#parserpostmodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteCredential](#parserdeletecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserGetCredentials](#parsergetcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCreateCredential](#parsercreatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserUpdateCredential](#parserupdatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserSwitch](#parserswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserValidate](#parservalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/disable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/enable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| config_id | path | | Yes | string | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/parameter-rules + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserParameter](#parserparameter) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/preferred-provider-type + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPreferredProviderType](#parserpreferredprovidertype) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/permission + +#### GET +##### Summary + +Get workspace permission settings + +##### Description + +Returns permission flags that control workspace features like member invitations and owner transfer. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/asset + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserAsset](#parserasset) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/debugging-key + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/fetch-manifest + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserIcon](#parsericon) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubInstall](#parsergithubinstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/pkg + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserList](#parserlist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/installations/ids + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/latest-versions + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/marketplace/pkg + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptions](#parserdynamicoptions) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options-with-credentials + +#### POST +##### Summary + +Fetch dynamic options using credentials directly (for edit mode) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptionsWithCredentials](#parserdynamicoptionswithcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPermissionChange](#parserpermissionchange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/autoupgrade/exclude + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserExcludePlugin](#parserexcludeplugin) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPreferencesChange](#parserpreferenceschange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/readme + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserReadme](#parserreadme) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserTasks](#parsertasks) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/delete_all + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete/{identifier} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| identifier | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/uninstall + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserUninstall](#parseruninstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpgrade](#parsergithubupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserMarketplaceUpgrade](#parsermarketplaceupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/bundle + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpload](#parsergithubupload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/pkg + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-labels + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderAddPayload](#apitoolprovideraddpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderDeletePayload](#apitoolproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/remote + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/schema + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolSchemaPayload](#apitoolschemapayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/test/pre + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolTestPayload](#apitooltestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderUpdatePayload](#apitoolproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolAddPayload](#builtintooladdpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| credential_type | path | | Yes | string | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credentials + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/default-credential + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinProviderDefaultCredentialPayload](#builtinproviderdefaultcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolCredentialDeletePayload](#builtintoolcredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ToolOAuthCustomClientPayload](#tooloauthcustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/tools + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolUpdatePayload](#builtintoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderDeletePayload](#mcpproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderCreatePayload](#mcpprovidercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderUpdatePayload](#mcpproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/auth + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPAuthPayload](#mcpauthpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/tools/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/update/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/create + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolCreatePayload](#workflowtoolcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolDeletePayload](#workflowtooldeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolUpdatePayload](#workflowtoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-providers + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/api + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/builtin + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/mcp + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/workflow + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/info + +#### GET +##### Summary + +Get info for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/oauth/client + +#### DELETE +##### Summary + +Remove custom OAuth client configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Summary + +Get OAuth client configuration for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Configure custom OAuth client for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerOAuthClientPayload](#triggeroauthclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id} + +#### POST +##### Summary + +Build a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/create + +#### POST +##### Summary + +Add a new subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderCreatePayload](#triggersubscriptionbuildercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id} + +#### GET +##### Summary + +Get the request logs for a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id} + +#### POST +##### Summary + +Update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id} + +#### POST +##### Summary + +Verify and update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id} + +#### GET +##### Summary + +Get a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/list + +#### GET +##### Summary + +List all trigger subscriptions for the current tenant's provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize + +#### GET +##### Summary + +Initiate OAuth authorization flow for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id} + +#### POST +##### Summary + +Verify credentials for an existing subscription (edit mode only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete + +#### POST +##### Summary + +Delete a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/update + +#### POST +##### Summary + +Update a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/triggers + +#### GET +##### Summary + +List all trigger providers for the current tenant + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceCustomConfigPayload](#workspacecustomconfigpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config/webapp-logo/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/info + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceInfoPayload](#workspaceinfopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SwitchWorkspacePayload](#switchworkspacepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| icon_type | path | | Yes | string | +| lang | path | | Yes | string | +| provider | path | | Yes | string | +| tenant_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +## default +Default namespace + +### /explore/banners + +#### GET +##### Summary + +Get banner list + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### APIBasedExtensionListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| APIBasedExtensionListResponse | array | | | + +#### APIBasedExtensionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | API endpoint URL | Yes | +| api_key | string | API key for authentication | Yes | +| name | string | Extension name | Yes | + +#### APIBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | | Yes | +| api_key | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| name | string | | Yes | + +#### Account + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| interface_language | | | No | +| interface_theme | | | No | +| is_password_set | boolean | | Yes | +| last_login_at | | | No | +| last_login_ip | | | No | +| name | string | | Yes | +| timezone | | | No | + +#### AccountAvatarPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | | Yes | + +#### AccountAvatarQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | Avatar file ID | Yes | + +#### AccountDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### AccountDeletionFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| feedback | string | | Yes | + +#### AccountInitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | +| invitation_code | | | No | +| timezone | string | | Yes | + +#### AccountIntegrateListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AccountIntegrateResponse](#accountintegrateresponse) ] | | Yes | + +#### AccountIntegrateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| is_bound | boolean | | Yes | +| link | | | No | +| provider | string | | Yes | + +#### AccountInterfaceLanguagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | + +#### AccountInterfaceThemePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_theme | string | *Enum:* `"dark"`, `"light"` | Yes | + +#### AccountNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### AccountPasswordPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password | | | No | +| repeat_new_password | string | | Yes | + +#### AccountTimezonePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| timezone | string | | Yes | + +#### AccountWithRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| last_active_at | | | No | +| last_login_at | | | No | +| name | string | | Yes | +| role | string | | Yes | +| status | string | | Yes | + +#### AccountWithRoleList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| accounts | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### ActivateCheckQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| interface_language | string | | Yes | +| name | string | | Yes | +| timezone | string | | Yes | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivationCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Activation data if valid | No | +| is_valid | boolean | Whether token is valid | Yes | + +#### ActivationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### AdvancedChatWorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | | No | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| message_id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### AdvancedChatWorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AdvancedChatWorkflowRunForList](#advancedchatworkflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### AdvancedChatWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | | | No | +| parent_message_id | | | No | +| query | string | | No | + +#### AdvancedPromptTemplateQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_mode | string | Application mode | Yes | +| has_context | string | Whether has context | No | +| model_mode | string | Model mode | Yes | +| model_name | string | Model name | Yes | + +#### AgentLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| message_id | string | Message UUID | Yes | + +#### AgentThought + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chain_id | | | No | +| created_at | | | No | +| files | [ string ] | | Yes | +| id | string | | Yes | +| message_chain_id | | | No | +| message_id | string | | Yes | +| observation | | | No | +| position | integer | | Yes | +| thought | | | No | +| tool | | | No | +| tool_input | | | No | +| tool_labels | [JSONValue](#jsonvalue) | | Yes | + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCountResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| count | integer | Number of annotations | Yes | + +#### AnnotationExportList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | + +#### AnnotationFilePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | Message ID | Yes | + +#### AnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_content | | | No | +| annotation_question | | | No | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | +| score | | | No | +| source | | | No | + +#### AnnotationHitHistoryList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AnnotationHitHistory](#annotationhithistory) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | string | Search keyword | No | +| limit | integer | Page size | No | +| page | integer | Page number | No | + +#### AnnotationReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### AnnotationReplyStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | *Enum:* `"disable"`, `"enable"` | Yes | + +#### AnnotationSettingUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Score threshold | Yes | + +#### ApiKeyAuthBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| credentials | object | | Yes | +| provider | string | | Yes | + +#### ApiKeyItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| last_used_at | | | No | +| token | string | | Yes | +| type | string | | Yes | + +#### ApiKeyList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ApiKeyItem](#apikeyitem) ] | | Yes | + +#### ApiProviderSchemaType + +Enum class for api provider schema type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ApiProviderSchemaType | string | Enum class for api provider schema type. | | + +#### ApiToolProviderAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | | Yes | + +#### ApiToolProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| original_provider | string | | Yes | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolSchemaPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| schema | string | | Yes | + +#### ApiToolTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| parameters | object | | Yes | +| provider_name | | | No | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | +| tool_name | string | | Yes | + +#### AppApiStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_api | boolean | Enable or disable API | Yes | + +#### AppDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| id | string | | Yes | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppDetailKernel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| mode | string | | No | +| name | string | | No | + +#### AppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| api_base_url | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| deleted_tools | [ [DeletedTool](#deletedtool) ] | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| site | | | No | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | boolean | Include secrets in export | No | +| workflow_id | | Specific workflow ID to export | No | + +#### AppExportResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | | Yes | + +#### AppIconPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | Icon data | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | + +#### AppImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | Import mode | Yes | +| name | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### AppListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_created_by_me | | Filter by creator | No | +| limit | integer | Page size (1-100) | No | +| mode | string | App mode filter
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"all"`, `"channel"`, `"chat"`, `"completion"`, `"workflow"` | No | +| name | | Filter by app name | No | +| page | integer | Page number (1-99999) | No | +| tag_ids | | Filter by tag IDs | No | + +#### AppMCPServerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | +| parameters | | | Yes | +| server_code | string | | Yes | +| status | [AppMCPServerStatus](#appmcpserverstatus) | | Yes | +| updated_at | | | No | + +#### AppMCPServerStatus + +AppMCPServer Status Enum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| AppMCPServerStatus | string | AppMCPServer Status Enum | | + +#### AppNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Name to check | Yes | + +#### AppPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [AppPartial](#apppartial) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### AppPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| author_name | | | No | +| create_user_name | | | No | +| created_at | | | No | +| created_by | | | No | +| desc_or_prompt | | | No | +| has_draft_trigger | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppSiteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| code | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | string | | Yes | +| default_language | string | | Yes | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| privacy_policy | | | No | +| prompt_public | boolean | | Yes | +| show_workflow_steps | boolean | | Yes | +| title | string | | Yes | +| use_icon_as_answer_icon | boolean | | Yes | + +#### AppSiteStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_site | boolean | Enable or disable site | Yes | + +#### AppSiteUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| use_icon_as_answer_icon | | | No | + +#### AppTracePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | Enable or disable tracing | Yes | +| tracing_provider | | Tracing provider | No | + +#### AudioTranscriptResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| text | string | Transcribed text from audio | Yes | + +#### BatchAddNotificationAccountsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notification_id | string | | Yes | +| user_email | [ string ] | List of account email addresses | Yes | + +#### BatchImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| upload_file_id | string | | Yes | + +#### BedrockRetrievalPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| knowledge_id | string | | Yes | +| query | string | | Yes | +| retrieval_setting | [BedrockRetrievalSetting](#bedrockretrievalsetting) | | Yes | + +#### BedrockRetrievalSetting + +Retrieval settings for Amazon Bedrock knowledge base queries. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Minimum relevance score threshold | No | +| top_k | | Maximum number of results to retrieve | No | + +#### BuiltinProviderDefaultCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### BuiltinToolAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | +| type | [CredentialType](#credentialtype) | | Yes | + +#### BuiltinToolCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### BuiltinToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### ButtonStyle + +Button styles for user actions. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ButtonStyle | string | Button styles for user actions. | | + +#### ChangeEmailResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_email | string | | Yes | +| token | string | | Yes | + +#### ChangeEmailSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | +| phase | | | No | +| token | | | No | + +#### ChangeEmailValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ChatConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| sort_by | string | Sort field and direction
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query | Yes | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### ChatMessagesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### ChatRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | object | | Yes | +| parent_message_id | | | No | +| query | string | | Yes | +| retriever_from | string | | No | + +#### CheckDependenciesResult + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [PluginDependency](#plugindependency) ] | | No | + +#### CheckEmailUniquePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | + +#### ChildChunkBatchUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunks | [ [ChildChunkUpdateArgs](#childchunkupdateargs) ] | | Yes | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | +| id | | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CodeBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Extension data | Yes | +| module | string | Module name | Yes | + +#### CompletionConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### CompletionMessageExplorePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| query | string | Query text | No | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### CompletionRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### ComplianceDownloadQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_name | string | Compliance document name | Yes | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConsoleDatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ids | [ string ] | Filter by dataset IDs | No | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### Conversation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotation | | | No | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| read_at | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationAnnotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| account | | | No | +| content | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | + +#### ConversationAnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_create_account | | | No | +| created_at | | | No | +| id | string | | Yes | + +#### ConversationDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| introduction | | | No | +| message_count | integer | | Yes | +| model_config | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | + +#### ConversationMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| status | string | | Yes | + +#### ConversationPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [Conversation](#conversation) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | Conversation variables for the draft workflow | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID to filter variables | Yes | + +#### ConversationWithSummary + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| message_count | integer | | Yes | +| model_config | | | No | +| name | string | | Yes | +| read_at | | | No | +| status | string | | Yes | +| status_count | | | No | +| summary_or_query | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationWithSummaryPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [ConversationWithSummary](#conversationwithsummary) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConvertToWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| name | | | No | + +#### CopyAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Description for the copied app | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| name | | Name for the copied app | No | + +#### CreateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | Annotation reply data | No | +| answer | | Answer text | No | +| content | | Content text | No | +| message_id | | Message ID | No | +| question | | Question text | No | + +#### CreateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| mode | string | App mode
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"chat"`, `"completion"`, `"workflow"` | Yes | +| name | string | App name | Yes | + +#### CredentialType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| CredentialType | string | | | + +#### DataSource + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| info_list | [InfoList](#infolist) | | Yes | + +#### DataSourceIntegrate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| disabled | boolean | | No | +| id | string | | No | +| is_bound | boolean | | No | +| link | string | | No | +| provider | string | | No | +| source_info | [DataSourceIntegrateWorkspace](#datasourceintegrateworkspace) | | No | + +#### DataSourceIntegrateIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | string | | No | +| type | string | | No | +| url | string | | No | + +#### DataSourceIntegrateList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [DataSourceIntegrate](#datasourceintegrate) ] | | No | + +#### DataSourceIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### DataSourceIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [DataSourceIntegratePage](#datasourceintegratepage) ] | | No | +| total | integer | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### DatasetAndDocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| batch | string | | Yes | +| dataset | [DatasetResponse](#datasetresponse) | | Yes | +| documents | [ [DocumentResponse](#documentresponse) ] | | Yes | + +#### DatasetBase + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| id | string | | No | +| indexing_technique | string | | No | +| name | string | | No | +| permission | string | | No | + +#### DatasetContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| content_type | string | | No | +| file_info | [DatasetFileInfo](#datasetfileinfo) | | No | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | + +#### DatasetDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_count | integer | | No | +| author_name | string | | No | +| built_in_field_enabled | boolean | | No | +| chunk_structure | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| doc_form | string | | No | +| doc_metadata | [ [DatasetDocMetadata](#datasetdocmetadata) ] | | No | +| document_count | integer | | No | +| embedding_available | boolean | | No | +| embedding_model | string | | No | +| embedding_model_provider | string | | No | +| enable_api | boolean | | No | +| external_knowledge_info | [ExternalKnowledgeInfo](#externalknowledgeinfo) | | No | +| external_retrieval_model | [ExternalRetrievalModel](#externalretrievalmodel) | | No | +| icon_info | [DatasetIconInfo](#dataseticoninfo) | | No | +| id | string | | No | +| indexing_technique | string | | No | +| is_multimodal | boolean | | No | +| is_published | boolean | | No | +| name | string | | No | +| permission | string | | No | +| pipeline_id | string | | No | +| provider | string | | No | +| retrieval_model_dict | [DatasetRetrievalModel](#datasetretrievalmodel) | | No | +| runtime_mode | string | | No | +| summary_index_setting | [_AnonymousInlineModel_b1954337d565](#_anonymousinlinemodel_b1954337d565) | | No | +| tags | [ [Tag](#tag) ] | | No | +| total_available_documents | integer | | No | +| total_documents | integer | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| word_count | integer | | No | + +#### DatasetDocMetadata + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### DatasetFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | string | | No | +| id | string | | No | +| mime_type | string | | No | +| name | string | | No | +| size | integer | | No | +| source_url | string | | No | + +#### DatasetIconInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | string | | No | + +#### DatasetKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetQueryDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| created_by_role | string | | No | +| id | string | | No | +| queries | [DatasetContent](#datasetcontent) | | No | +| source | string | | No | +| source_app_id | string | | No | + +#### DatasetRerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | string | | No | +| reranking_provider_name | string | | No | + +#### DatasetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| data_source_type | | | No | +| description | | | No | +| id | string | | Yes | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | + +#### DatasetRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_enable | boolean | | No | +| reranking_mode | string | | No | +| reranking_model | [DatasetRerankingModel](#datasetrerankingmodel) | | No | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| search_method | string | | No | +| top_k | integer | | No | +| weights | [DatasetWeightedScore](#datasetweightedscore) | | No | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| icon_info | | | No | +| indexing_technique | | | No | +| is_multimodal | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | No | +| embedding_provider_name | string | | No | +| vector_weight | number | | No | + +#### DatasetWeightedScore + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | [DatasetKeywordSetting](#datasetkeywordsetting) | | No | +| vector_setting | [DatasetVectorSetting](#datasetvectorsetting) | | No | +| weight_type | string | | No | + +#### DatasourceCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### DatasourceCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### DatasourceCredentialUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### DatasourceCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### DatasourceDefaultPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### DatasourceUpdateNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| name | string | | Yes | + +#### DatasourceVariablesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info | object | | Yes | +| datasource_type | string | | Yes | +| start_node_id | string | | Yes | +| start_node_title | string | | Yes | + +#### DebugPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DebugPermission | string | | | + +#### DefaultBlockConfigQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| q | | | No | + +#### DeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | +| tool_name | string | | Yes | +| type | string | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentMetadataResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | +| value | | | No | + +#### DocumentMetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_metadata | | | No | +| doc_type | | | No | + +#### DocumentRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### DocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| word_count | | | No | + +#### DocumentRetryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string ] | | Yes | + +#### DocumentWithSegmentsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| completed_segments | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| process_rule_dict | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| total_segments | | | No | +| word_count | | | No | + +#### DraftWorkflowNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | + +#### DraftWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| start_node_id | string | | Yes | + +#### DraftWorkflowSyncPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | | | No | +| environment_variables | | | No | +| features | | | No | +| graph | object | | Yes | +| hash | | | No | +| rag_pipeline_variables | | | No | + +#### DraftWorkflowTriggerRunAllPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_ids | [ string ] | | Yes | + +#### DraftWorkflowTriggerRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### DraftWorkflowTriggerRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | Node ID | Yes | + +#### EducationActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| institution | string | | Yes | +| role | string | | Yes | +| token | string | | Yes | + +#### EducationAutocompleteQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keywords | string | | Yes | +| limit | integer | | No | +| page | integer | | No | + +#### EducationAutocompleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| curr_page | | | No | +| data | [ string ] | | No | +| has_next | | | No | + +#### EducationStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_refresh | | | No | +| expire_at | | | No | +| is_student | | | No | +| result | | | No | + +#### EducationVerifyResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | | | No | + +#### EmailCodeLoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| language | | | No | +| token | string | | Yes | + +#### EmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailRegisterResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### EmailRegisterSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| language | | Language code | No | + +#### EmailRegisterValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### EndpointCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| settings | object | | Yes | + +#### EndpointCreateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDeleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDisableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointEnableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointIdPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | + +#### EndpointListForPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | +| plugin_id | string | | Yes | + +#### EndpointListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | + +#### EndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### EndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### EndpointUpdateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EnvironmentVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| environment_variables | [ object ] | Environment variables for the draft workflow | Yes | + +#### ExecutionContentType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ExecutionContentType | string | | | + +#### ExternalApiTemplateListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | + +#### ExternalDatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| external_knowledge_api_id | string | | Yes | +| external_knowledge_id | string | | Yes | +| external_retrieval_model | | | No | +| name | string | | Yes | + +#### ExternalHitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_retrieval_model | | | No | +| metadata_filtering_conditions | | | No | +| query | string | | Yes | + +#### ExternalKnowledgeApiPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### ExternalKnowledgeInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_knowledge_api_endpoint | string | | No | +| external_knowledge_api_id | string | | No | +| external_knowledge_api_name | string | | No | +| external_knowledge_id | string | | No | + +#### ExternalRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| top_k | integer | | No | + +#### FeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Feature configuration object | No | + +#### Feedback + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| from_account | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| rating | string | | Yes | + +#### FeedbackExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end_date | | End date (YYYY-MM-DD) | No | +| format | string | Export format
*Enum:* `"csv"`, `"json"` | No | +| from_source | | Filter by feedback source | No | +| has_comment | | Only include feedback with comments | No | +| rating | | Filter by rating | No | +| start_date | | Start date (YYYY-MM-DD) | No | + +#### FeedbackStat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dislike | integer | | Yes | +| like | integer | | Yes | + +#### FileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_ids | [ string ] | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| is_valid | boolean | Whether code is valid | Yes | +| token | string | New reset token | Yes | + +#### ForgotPasswordEmailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | | Error code if account not found | No | +| data | | Reset token | No | +| result | string | Operation result | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### FormInput + +Form input definition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| default | | | No | +| output_variable_name | string | | Yes | +| type | [FormInputType](#forminputtype) | | Yes | + +#### FormInputDefault + +Default configuration for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| selector | [ string ] | | No | +| type | [PlaceholderType](#placeholdertype) | | Yes | +| value | string | | No | + +#### FormInputType + +Form input types. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| FormInputType | string | Form input types. | | + +#### GenerateSummaryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_list | [ string ] | | Yes | + +#### Github + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| github_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### HitTestingChildChunk + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| id | | | No | +| position | | | No | +| score | | | No | + +#### HitTestingDocument + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | | | No | +| doc_metadata | | | No | +| doc_type | | | No | +| id | | | No | +| name | | | No | + +#### HitTestingFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | | | No | +| id | | | No | +| mime_type | | | No | +| name | | | No | +| size | | | No | +| source_url | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HitTestingRecord + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| child_chunks | [ [HitTestingChildChunk](#hittestingchildchunk) ] | | No | +| files | [ [HitTestingFile](#hittestingfile) ] | | No | +| score | | | No | +| segment | | | No | +| summary | | | No | +| tsne_position | | | No | + +#### HitTestingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| query | string | | Yes | +| records | [ [HitTestingRecord](#hittestingrecord) ] | | No | + +#### HitTestingSegment + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| completed_at | | | No | +| content | | | No | +| created_at | | | No | +| created_by | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| document | | | No | +| document_id | | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | | | No | +| index_node_hash | | | No | +| index_node_id | | | No | +| indexing_at | | | No | +| keywords | [ string ] | | No | +| position | | | No | +| sign_content | | | No | +| status | | | No | +| stopped_at | | | No | +| tokens | | | No | +| word_count | | | No | + +#### HumanInputContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| form_definition | | | No | +| form_submission_data | | | No | +| submitted | boolean | | Yes | +| type | [ExecutionContentType](#executioncontenttype) | | No | +| workflow_run_id | string | | Yes | + +#### HumanInputDeliveryTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| delivery_method_id | string | Delivery method ID | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormDefinition + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| actions | [ [UserAction](#useraction) ] | | No | +| display_in_ui | boolean | | No | +| expiration_time | integer | | Yes | +| form_content | string | | Yes | +| form_id | string | | Yes | +| form_token | | | No | +| inputs | [ [FormInput](#forminput) ] | | No | +| node_id | string | | Yes | +| node_title | string | | Yes | +| resolved_default_values | object | | No | + +#### HumanInputFormPreviewPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormSubmissionData + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action_id | string | | Yes | +| action_text | string | | Yes | +| node_id | string | | Yes | +| node_title | string | | Yes | +| rendered_content | string | | Yes | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | Selected action ID | Yes | +| form_inputs | object | Values the user provides for the form's own fields | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | Yes | + +#### IconType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| IconType | string | | | + +#### Import + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| app_mode | | | No | +| current_dsl_version | string | | No | +| error | string | | No | +| id | string | | Yes | +| imported_dsl_version | string | | No | +| status | [ImportStatus](#importstatus) | | Yes | + +#### ImportStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ImportStatus | string | | | + +#### IncludeSecretQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | string | | No | + +#### IndexingEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dataset_id | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| indexing_technique | string | | Yes | +| info_list | object | | Yes | +| process_rule | object | | Yes | + +#### InfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | string | *Enum:* `"notion_import"`, `"upload_file"`, `"website_crawl"` | Yes | +| file_info_list | | | No | +| notion_info_list | | | No | +| website_info_list | | | No | + +#### Inner + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | | | No | +| model_type | [ModelType](#modeltype) | | Yes | +| provider | | | No | + +#### InsertExploreAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| can_trial | boolean | | No | +| category | string | | Yes | +| copyright | | | No | +| custom_disclaimer | | | No | +| desc | | | No | +| language | string | | Yes | +| position | integer | | Yes | +| privacy_policy | | | No | +| trial_limit | integer | | No | + +#### InsertExploreBannerPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| description | string | | Yes | +| img-src | string | | Yes | +| language | string | | No | +| link | string | | Yes | +| sort | integer | | Yes | +| title | string | | Yes | + +#### InstallPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| InstallPermission | string | | | + +#### InstalledAppCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | + +#### InstalledAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | +| use_icon_as_answer_icon | | | No | + +#### InstalledAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| installed_apps | [ [InstalledAppResponse](#installedappresponse) ] | | Yes | + +#### InstalledAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | [InstalledAppInfoResponse](#installedappinforesponse) | | Yes | +| app_owner_tenant_id | string | | Yes | +| editable | boolean | | Yes | +| id | string | | Yes | +| is_pinned | boolean | | Yes | +| last_used_at | | | No | +| uninstallable | boolean | | Yes | + +#### InstalledAppUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_pinned | | | No | + +#### InstalledAppsListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | App ID to filter by | No | + +#### InstructionGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current | string | Current instruction text | No | +| flow_id | string | Workflow/Flow ID | Yes | +| ideal_output | string | Expected ideal output | No | +| instruction | string | Instruction for generation | Yes | +| language | string | Programming language (javascript/python) | No | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| node_id | string | Node ID for workflow context | No | + +#### InstructionTemplatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | Instruction template type | Yes | + +#### IterationNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### JSONValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JSONValue | | | | + +#### KnowledgeConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| duplicate | boolean | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | string | *Enum:* `"economy"`, `"high_quality"` | Yes | +| is_multimodal | boolean | | No | +| name | | | No | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### LLMMode + +Enum class for large language model mode. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| LLMMode | string | Enum class for large language model mode. | | + +#### LangContentPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| body | string | | Yes | +| lang | string | Language tag: 'zh' \| 'en' \| 'jp' | Yes | +| subtitle | | | No | +| title | string | | Yes | +| title_pic_url | | | No | + +#### LegacyEndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | +| name | string | | Yes | +| settings | object | | Yes | + +#### LoadBalancingCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### LoadBalancingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| configs | | | No | +| enabled | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| invite_token | | Invitation token | No | +| password | string | | Yes | +| remember_me | boolean | Remember me flag | No | + +#### LoopNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### MCPAuthPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authorization_code | | | No | +| provider_id | string | | Yes | + +#### MCPProviderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | + +#### MCPProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| provider_id | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPServerCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| parameters | object | Server parameters configuration | Yes | + +#### MCPServerUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| id | string | Server ID | Yes | +| parameters | object | Server parameters configuration | Yes | +| status | | Server status | No | + +#### Marketplace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marketplace_plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### MemberInvitePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emails | [ string ] | | No | +| language | | | No | +| role | [TenantAccountRole](#tenantaccountrole) | | Yes | + +#### MemberRoleUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| role | string | | Yes | + +#### MessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | Yes | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | integer | | Yes | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| feedbacks | [ [Feedback](#feedback) ] | | Yes | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | [JSONValue](#jsonvalue) | | Yes | +| message_files | [ [MessageFile](#messagefile) ] | | Yes | +| message_metadata_dict | [JSONValue](#jsonvalue) | | Yes | +| message_tokens | integer | | Yes | +| parent_message_id | | | No | +| provider_response_latency | number | | Yes | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageDetailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | No | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | | | No | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| extra_contents | [ [HumanInputContent](#humaninputcontent) ] | | No | +| feedbacks | [ [Feedback](#feedback) ] | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | | | No | +| message_files | [ [MessageFile](#messagefile) ] | | No | +| message_metadata_dict | | | No | +| message_tokens | | | No | +| parent_message_id | | | No | +| provider_response_latency | | | No | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| message_id | string | Message ID | Yes | +| rating | | | No | + +#### MessageFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| belongs_to | | | No | +| filename | string | | Yes | +| id | string | | Yes | +| mime_type | | | No | +| size | | | No | +| transfer_method | string | | Yes | +| type | string | | Yes | +| upload_file_id | | | No | +| url | | | No | + +#### MessageInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [MessageDetailResponse](#messagedetailresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### ModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_params | object | | No | +| mode | [LLMMode](#llmmode) | | Yes | +| name | string | | Yes | +| provider | string | | Yes | + +#### ModelConfigPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| model_dict | | | No | +| pre_prompt | | | No | +| updated_at | | | No | +| updated_by | | | No | + +#### ModelConfigRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | | Agent mode configuration | No | +| configs | | Model configuration parameters | No | +| dataset_configs | | Dataset configurations | No | +| model | | Model name | No | +| more_like_this | | More like this configuration | No | +| opening_statement | | Opening statement | No | +| provider | | Model provider | No | +| retrieval_model | | Retrieval model configuration | No | +| speech_to_text | | Speech to text configuration | No | +| suggested_questions | | Suggested questions | No | +| text_to_speech | | Text to speech configuration | No | +| tools | | Available tools | No | + +#### ModelType + +Enum class for model type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ModelType | string | Enum class for model type. | | + +#### MoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | Yes | + +#### NodeIdQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### NodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### NodeRunRequiredPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | | Yes | + +#### NotionEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| notion_info_list | [ object ] | | Yes | +| process_rule | object | | Yes | + +#### NotionIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | | | No | +| type | string | | Yes | +| url | | | No | + +#### NotionInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| pages | [ [NotionPage](#notionpage) ] | | Yes | +| workspace_id | string | | Yes | + +#### NotionIntegrateInfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notion_info | [ [NotionIntegrateWorkspace](#notionintegrateworkspace) ] | | No | + +#### NotionIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_bound | boolean | | No | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### NotionIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [NotionIntegratePage](#notionintegratepage) ] | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### NotionPage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | | | No | +| page_id | string | | Yes | +| page_name | string | | Yes | +| type | string | | Yes | + +#### OAuthDataSourceBindingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OAuthDataSourceResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | Authorization URL or 'internal' for internal setup | Yes | + +#### OAuthDataSourceSyncResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OwnerTransferCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### OwnerTransferEmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### OwnerTransferPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | string | | Yes | + +#### Package + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### PaginatedConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### Parser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### ParserAsset + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_name | string | | Yes | +| plugin_unique_identifier | string | | Yes | + +#### ParserCreateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserCredentialCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialDelete + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialId + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | + +#### ParserCredentialSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### ParserDeleteCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDeleteModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDynamicOptions + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | | | No | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | +| provider_type | string | *Enum:* `"tool"`, `"trigger"` | Yes | + +#### ParserDynamicOptionsWithCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | + +#### ParserEnable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_trigger | boolean | | Yes | +| trigger_id | string | | Yes | + +#### ParserExcludePlugin + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_id | string | | Yes | + +#### ParserGetCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGetDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGithubInstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| filename | string | | Yes | +| tenant_id | string | | Yes | + +#### ParserLatest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_ids | [ string ] | | Yes | + +#### ParserList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserMarketplaceUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | + +#### ParserModelList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | | | No | + +#### ParserParameter + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | + +#### ParserPermissionChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | Yes | +| install_permission | [InstallPermission](#installpermission) | | Yes | + +#### ParserPluginIdentifierQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | + +#### ParserPluginIdentifiers + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifiers | [ string ] | | Yes | + +#### ParserPostDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_settings | [ [Inner](#inner) ] | | Yes | + +#### ParserPostModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| load_balancing | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserPreferencesChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_upgrade | [PluginAutoUpgradeSettingsPayload](#pluginautoupgradesettingspayload) | | Yes | +| permission | [PluginPermissionSettingsPayload](#pluginpermissionsettingspayload) | | Yes | + +#### ParserPreferredProviderType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| preferred_provider_type | string | *Enum:* `"custom"`, `"system"` | Yes | + +#### ParserReadme + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | | No | +| plugin_unique_identifier | string | | Yes | + +#### ParserSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserTasks + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserUninstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_installation_id | string | | Yes | + +#### ParserUpdateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### PartnerTenantsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| click_id | string | Click Id from partner referral link | Yes | + +#### Payload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon_info | | | No | +| name | string | | Yes | + +#### PipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### PlaceholderType + +Default value types for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| PlaceholderType | string | Default value types for form inputs. | | + +#### PluginAutoUpgradeSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| exclude_plugins | [ string ] | | No | +| include_plugins | [ string ] | | No | +| strategy_setting | [StrategySetting](#strategysetting) | | No | +| upgrade_mode | [UpgradeMode](#upgrademode) | | No | +| upgrade_time_of_day | integer | | No | + +#### PluginDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | | | No | +| type | [Type](#type) | | Yes | +| value | | | Yes | + +#### PluginEndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### PluginPermissionSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | No | +| install_permission | [InstallPermission](#installpermission) | | No | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### PublishWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### PublishedWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_preview | boolean | | No | +| original_document_id | | | No | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | No | +| start_node_id | string | | Yes | + +#### RagPipelineDatasetImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| yaml_content | string | | Yes | + +#### RagPipelineImport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_dsl_version | string | | No | +| dataset_id | string | | No | +| error | string | | No | +| id | string | | No | +| imported_dsl_version | string | | No | +| pipeline_id | string | | No | +| status | string | | No | + +#### RagPipelineImportCheckDependencies + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [RagPipelineLeakedDependency](#ragpipelineleakeddependency) ] | | No | + +#### RagPipelineImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | | Yes | +| name | | | No | +| pipeline_id | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### RagPipelineLeakedDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | string | | No | +| type | string | | No | +| value | object | | No | + +#### RagPipelineRecommendedPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | | No | + +#### RecommendedAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | + +#### RecommendedAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| categories | [ string ] | | Yes | +| recommended_apps | [ [RecommendedAppResponse](#recommendedappresponse) ] | | Yes | + +#### RecommendedAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | | | No | +| app_id | string | | Yes | +| can_trial | | | No | +| categories | [ string ] | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| description | | | No | +| is_listed | | | No | +| position | | | No | +| privacy_policy | | | No | + +#### RecommendedAppsQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | Language code for recommended app localization | No | + +#### RelatedAppList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AppDetailKernel](#appdetailkernel) ] | | No | +| total | integer | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### ResultResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | | Yes | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### RuleCodeGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code_language | string | Programming language for code generation | No | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleStructuredOutputPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Structured output generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | string | | No | +| hit_count_gte | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | [ string ] | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### SimpleMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | | Yes | +| inputs | object | | Yes | +| message | string | | Yes | +| query | string | | Yes | + +#### SimpleModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_dict | | | No | +| pre_prompt | | | No | + +#### Site + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_base_url | | | No | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| code | | | No | +| copyright | | | No | +| created_at | | | No | +| created_by | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | + +#### StatisticTimeRangeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### StatusCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | Yes | +| partial_success | integer | | Yes | +| paused | integer | | Yes | +| success | integer | | Yes | + +#### StrategySetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| StrategySetting | string | | | + +#### SubscriptionQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interval | string | Billing interval
*Enum:* `"month"`, `"year"` | Yes | +| plan | string | Subscription plan
*Enum:* `"professional"`, `"team"` | Yes | + +#### SuggestedQuestionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ string ] | Suggested question | Yes | + +#### SwitchWorkspacePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tenant_id | string | | Yes | + +#### SyncDraftWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | | No | +| environment_variables | [ object ] | | No | +| features | object | | Yes | +| graph | object | | Yes | +| hash | | | No | + +#### SyncDraftWorkflowResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| hash | string | | No | +| result | string | | No | +| updated_at | string | | No | + +#### SystemFeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | System feature configuration object | No | + +#### Tag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### TagBasePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Tag name | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to bind | Yes | +| target_id | string | Target ID to bind tags to | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingRemovePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to remove | Yes | +| target_id | string | Target ID to unbind tag from | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagListQueryParam + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| type | string | Tag type filter
*Enum:* `""`, `"app"`, `"knowledge"` | No | + +#### TagResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | | | No | + +#### TagType + +Tag type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TagType | string | Tag type | | + +#### TenantAccountRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TenantAccountRole | string | | | + +#### TenantInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| custom_config | | | No | +| id | string | | Yes | +| in_trial | | | No | +| name | | | No | +| next_credit_reset_date | | | No | +| plan | | | No | +| role | | | No | +| status | | | No | +| trial_credits | | | No | +| trial_credits_used | | | No | +| trial_end_reason | | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### TextToSpeechPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Whether to stream audio | No | +| text | string | Text to convert | Yes | +| voice | | Voice name | No | + +#### TextToSpeechRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | | No | +| streaming | | | No | +| text | | | No | +| voice | | | No | + +#### TextToSpeechVoiceQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | Language code | Yes | + +#### ToolOAuthCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### ToolParameterForm + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ToolParameterForm | string | | | + +#### TraceConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_config | object | Tracing configuration data | Yes | +| tracing_provider | string | Tracing provider name | Yes | + +#### TraceProviderQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_provider | string | Tracing provider name | Yes | + +#### TrialAppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | string | | No | +| api_base_url | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| deleted_tools | [ [TrialDeletedTool](#trialdeletedtool) ] | | No | +| description | string | | No | +| enable_api | boolean | | No | +| enable_site | boolean | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| max_active_requests | integer | | No | +| mode | string | | No | +| model_config | [TrialAppModelConfig](#trialappmodelconfig) | | No | +| name | string | | No | +| site | [TrialSite](#trialsite) | | No | +| tags | [ [TrialTag](#trialtag) ] | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | +| workflow | [TrialWorkflowPartial](#trialworkflowpartial) | | No | + +#### TrialAppModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | object | | No | +| annotation_reply | object | | No | +| chat_prompt_config | object | | No | +| completion_prompt_config | object | | No | +| created_at | object | | No | +| created_by | string | | No | +| dataset_configs | object | | No | +| dataset_query_variable | string | | No | +| external_data_tools | object | | No | +| file_upload | object | | No | +| model | object | | No | +| more_like_this | object | | No | +| opening_statement | string | | No | +| pre_prompt | string | | No | +| prompt_type | string | | No | +| retriever_resource | object | | No | +| sensitive_word_avoidance | object | | No | +| speech_to_text | object | | No | +| suggested_questions | object | | No | +| suggested_questions_after_answer | object | | No | +| text_to_speech | object | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| user_input_form | object | | No | + +#### TrialConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### TrialDeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | No | +| tool_name | string | | No | +| type | string | | No | + +#### TrialPipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### TrialSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_token | string | | No | +| app_base_url | string | | No | +| chat_color_theme | string | | No | +| chat_color_theme_inverted | boolean | | No | +| code | string | | No | +| copyright | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| custom_disclaimer | string | | No | +| customize_domain | string | | No | +| customize_token_strategy | string | | No | +| default_language | string | | No | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| privacy_policy | string | | No | +| prompt_public | boolean | | No | +| show_workflow_steps | boolean | | No | +| title | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | + +#### TrialTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### TrialWorkflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [TrialConversationVariable](#trialconversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [TrialPipelineVariable](#trialpipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### TrialWorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| id | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | + +#### TriggerOAuthClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enabled | | | No | + +#### TriggerSubscriptionBuilderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_type | string | | No | + +#### TriggerSubscriptionBuilderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | | | No | +| name | | | No | +| parameters | | | No | +| properties | | | No | + +#### TriggerSubscriptionBuilderVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### Type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| Type | string | | | + +#### UpdateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | | No | +| answer | | | No | +| content | | | No | +| question | | | No | + +#### UpdateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| max_active_requests | | Maximum active requests | No | +| name | string | App name | Yes | +| use_icon_as_answer_icon | | Use icon as answer icon | No | + +#### UpgradeMode + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| UpgradeMode | string | | | + +#### UploadConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_image_file_size_limit | | | No | +| audio_file_size_limit | integer | | Yes | +| batch_count_limit | integer | | Yes | +| file_size_limit | integer | | Yes | +| file_upload_limit | | | No | +| image_file_batch_limit | integer | | Yes | +| image_file_size_limit | integer | | Yes | +| single_chunk_attachment_limit | integer | | Yes | +| video_file_size_limit | integer | | Yes | +| workflow_file_upload_limit | integer | | Yes | + +#### UpsertNotificationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| contents | [ [LangContentPayload](#langcontentpayload) ] | | Yes | +| end_time | | RFC3339, e.g. 2026-03-20T23:59:59Z | No | +| frequency | string | 'once' \| 'every_page_load' | No | +| notification_id | | Omit to create; supply UUID to update | No | +| start_time | | RFC3339, e.g. 2026-03-01T00:00:00Z | No | +| status | string | 'active' \| 'inactive' | No | + +#### UserAction + +User action configuration. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| button_style | [ButtonStyle](#buttonstyle) | | No | +| id | string | | Yes | +| title | string | | Yes | + +#### WebhookTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| node_id | string | | Yes | +| webhook_debug_url | string | | Yes | +| webhook_id | string | | Yes | +| webhook_url | string | | Yes | + +#### WebsiteCrawlPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| options | object | | Yes | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | +| url | string | | Yes | + +#### WebsiteCrawlStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | + +#### WebsiteInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| job_id | string | | Yes | +| only_main_content | boolean | | No | +| provider | string | | Yes | +| urls | [ string ] | | Yes | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### Workflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [ConversationVariable](#conversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [PipelineVariable](#pipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowAppLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | Filter logs created after this timestamp | No | +| created_at__before | | Filter logs created before this timestamp | No | +| created_by_account | | Filter by account | No | +| created_by_end_user_session_id | | Filter by end user session ID | No | +| detail | boolean | Whether to return detailed logs | No | +| keyword | | Search keyword for filtering logs | No | +| limit | integer | Number of items per page (1-100) | No | +| page | integer | Page number (1-99999) | No | +| status | | Execution status filter (succeeded, failed, stopped, partial-succeeded) | No | + +#### WorkflowArchivedLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowArchivedLogPartialResponse](#workflowarchivedlogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowArchivedLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| id | string | | Yes | +| trigger_metadata | | | No | +| workflow_run | | | No | + +#### WorkflowCommentBasic + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mention_count | integer | | No | +| participants | [ [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| reply_count | integer | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | +| position_x | number | Comment X position | Yes | +| position_y | number | Comment Y position | Yes | + +#### WorkflowCommentDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mentions | [ [_AnonymousInlineModel_f7ff64cce858](#_anonymousinlinemodel_f7ff64cce858) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| replies | [ [_AnonymousInlineModel_55c39c6a4b9e](#_anonymousinlinemodel_55c39c6a4b9e) ] | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentMentionUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| users | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### WorkflowCommentReplyCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Reply content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | + +#### WorkflowCommentReplyUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentResolve + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | + +#### WorkflowCommentUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | | Mentioned user IDs. Omit to keep existing mentions. | No | +| position_x | | Comment X position | No | +| position_y | | Comment Y position | No | + +#### WorkflowDraftEnvVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftEnvVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftEnvVariable](#workflowdraftenvvariable) ] | | No | + +#### WorkflowDraftVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| full_content | object | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value | object | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariable](#workflowdraftvariable) ] | | No | + +#### WorkflowDraftVariableListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Items per page | No | +| page | integer | Page number | No | + +#### WorkflowDraftVariableListWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariableWithoutValue](#workflowdraftvariablewithoutvalue) ] | | No | +| total | object | | No | + +#### WorkflowDraftVariablePatchPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | | No | +| value | | | No | + +#### WorkflowDraftVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | Variable name | No | +| value | | Variable value | No | + +#### WorkflowDraftVariableWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowExecutionStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| WorkflowExecutionStatus | string | | | + +#### WorkflowFeaturesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Workflow feature configuration | Yes | + +#### WorkflowListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| named_only | boolean | | No | +| page | integer | | No | +| user_id | | | No | + +#### WorkflowOnlineUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_ids | [ string ] | App IDs | No | + +#### WorkflowPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_more | boolean | | No | +| items | [ [Workflow](#workflow) ] | | No | +| limit | integer | | No | +| page | integer | | No | + +#### WorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| id | string | | Yes | +| updated_at | | | No | +| updated_by | | | No | + +#### WorkflowRunCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | No | +| partial_succeeded | integer | | No | +| running | integer | | No | +| stopped | integer | | No | +| succeeded | integer | | No | +| total | integer | | No | + +#### WorkflowRunCountQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | | Workflow run status filter | No | +| time_range | | Time range filter (e.g., 7d, 4h, 30m, 30s) | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| graph | object | | No | +| id | string | | No | +| inputs | object | | No | +| outputs | object | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunExport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| presigned_url | string | Pre-signed URL for download | No | +| presigned_url_expires_at | string | Pre-signed URL expiration time | No | +| status | string | Export status: success/failed | No | + +#### WorkflowRunForArchivedLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| elapsed_time | | | No | +| id | string | | Yes | +| status | | | No | +| total_tokens | | | No | +| triggered_from | | | No | + +#### WorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last run ID for pagination | No | +| limit | integer | Number of items per page (1-100) | No | +| status | | Workflow run status filter | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunNodeExecution + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| execution_metadata | object | | No | +| extras | object | | No | +| finished_at | object | | No | +| id | string | | No | +| index | integer | | No | +| inputs | object | | No | +| inputs_truncated | boolean | | No | +| node_id | string | | No | +| node_type | string | | No | +| outputs | object | | No | +| outputs_truncated | boolean | | No | +| predecessor_node_id | string | | No | +| process_data | object | | No | +| process_data_truncated | boolean | | No | +| status | string | | No | +| title | string | | No | + +#### WorkflowRunNodeExecutionList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunNodeExecution](#workflowrunnodeexecution) ] | | No | + +#### WorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunForList](#workflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowRunQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### WorkflowRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowStatisticQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date and time (YYYY-MM-DD HH:MM) | No | +| start | | Start date and time (YYYY-MM-DD HH:MM) | No | + +#### WorkflowToolCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_app_id | string | | Yes | + +#### WorkflowToolDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| workflow_tool_id | string | | Yes | + +#### WorkflowToolParameterConfiguration + +Workflow tool configuration + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | The description of the parameter | Yes | +| form | [ToolParameterForm](#toolparameterform) | The form of the parameter | Yes | +| name | string | The name of the parameter | Yes | + +#### WorkflowToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_tool_id | string | | Yes | + +#### WorkflowTriggerListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowTriggerResponse](#workflowtriggerresponse) ] | | Yes | + +#### WorkflowTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| icon | string | | Yes | +| id | string | | Yes | +| node_id | string | | Yes | +| provider_name | string | | Yes | +| status | string | | Yes | +| title | string | | Yes | +| trigger_type | string | | Yes | +| updated_at | | | No | + +#### WorkflowUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### WorkspaceCustomConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| remove_webapp_brand | | | No | +| replace_webapp_logo | | | No | + +#### WorkspaceInfoPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### WorkspaceListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| page | integer | | No | + +#### _AnonymousInlineModel_55c39c6a4b9e + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | + +#### _AnonymousInlineModel_6fec07cd0d85 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar_url | object | | No | +| email | string | | No | +| id | string | | No | +| name | string | | No | + +#### _AnonymousInlineModel_b1954337d565 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable | boolean | | No | +| model_name | string | | No | +| model_provider_name | string | | No | +| summary_prompt | string | | No | + +#### _AnonymousInlineModel_f7ff64cce858 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mentioned_user_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| mentioned_user_id | string | | No | +| reply_id | string | | No | + +## FastOpenAPI Preview (OpenAPI 3.0) + +### Dify API (FastOpenAPI PoC) +FastOpenAPI proof of concept for Dify API + +#### Version: 1.0 + +--- + +##### [GET] /console/api/init +**Get initialization validation status.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [InitStatusResponse](#initstatusresponse)
| + +##### [POST] /console/api/init +**Validate initialization password.** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [InitValidatePayload](#initvalidatepayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [InitValidateResponse](#initvalidateresponse)
| + +##### [GET] /console/api/ping +**Health check endpoint for connection testing.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [PingResponse](#pingresponse)
| + +##### [GET] /console/api/setup +**Get system setup status. + + NOTE: This endpoint is unauthenticated by design. + + During first-time bootstrap there is no admin account yet, so frontend initialization must be + able to query setup progress before any login flow exists. + + Only bootstrap-safe status information should be returned by this endpoint. + ** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [SetupStatusResponse](#setupstatusresponse)
| + +##### [POST] /console/api/setup +**Initialize system setup with admin account. + + NOTE: This endpoint is unauthenticated by design for first-time bootstrap. + Access is restricted by deployment mode (`SELF_HOSTED`), one-time setup guards, + and init-password validation rather than user session authentication. + ** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [SetupRequestPayload](#setuprequestpayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [SetupResponse](#setupresponse)
| + +##### [GET] /console/api/version +**Check for application version updates.** + +###### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| current_version | query | | Yes | string | + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [VersionResponse](#versionresponse)
| + +--- +##### Schemas + +###### ErrorSchema + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| error | { **"details"**: string, **"message"**: string, **"status"**: integer, **"type"**: string } | | Yes | + +###### InitStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | string,
**Available values:** "finished", "not_started" | Initialization status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### InitValidatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| password | string | Initialization password | Yes | + +###### InitValidateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +###### PingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Health check result | Yes | + +###### SetupRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Admin email address | Yes | +| language | | Admin language | No | +| name | string | Admin name (max 30 characters) | Yes | +| password | string | Admin password | Yes | + +###### SetupResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Setup result | Yes | + +###### SetupStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| setup_at | | Setup completion time (ISO format) | No | +| step | string,
**Available values:** "finished", "not_started" | Setup step status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### VersionFeatures + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_replace_logo | boolean | Whether logo replacement is supported | Yes | +| model_load_balancing_enabled | boolean | Whether model load balancing is enabled | Yes | + +###### VersionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_auto_update | boolean | Whether auto-update is supported | Yes | +| features | [VersionFeatures](#versionfeatures) | Feature flags and capabilities | Yes | +| release_date | string | Release date of latest version | Yes | +| release_notes | string | Release notes for latest version | Yes | +| version | string | Latest version number | Yes | diff --git a/api/openapi/markdown/service-swagger.md b/api/openapi/markdown/service-swagger.md new file mode 100644 index 0000000000..ec5ed280f5 --- /dev/null +++ b/api/openapi/markdown/service-swagger.md @@ -0,0 +1,2754 @@ +# Service API +API for application services + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## service_api +Service operations + +### / + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/feedbacks + +#### GET +##### Summary + +Get all feedbacks for the application + +##### Description + +Get all feedbacks for the application +Returns paginated list of all feedback submitted for messages in this app. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackListQuery](#feedbacklistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedbacks retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action} + +#### POST +##### Summary + +Enable or disable annotation reply feature + +##### Description + +Enable or disable annotation reply feature + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyActionPayload](#annotationreplyactionpayload) | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action}/status/{job_id} + +#### GET +##### Summary + +Get the status of an annotation reply action job + +##### Description + +Get the status of an annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Job not found | + +### /apps/annotations + +#### GET +##### Summary + +List annotations for the application + +##### Description + +List annotations for the application + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations retrieved successfully | [AnnotationList](#annotationlist) | +| 401 | Unauthorized - invalid API token | | + +#### POST +##### Summary + +Create a new annotation + +##### Description + +Create a new annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | + +### /apps/annotations/{annotation_id} + +#### DELETE +##### Summary + +Delete an annotation + +##### Description + +Delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Annotation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Annotation not found | + +#### PUT +##### Summary + +Update an existing annotation + +##### Description + +Update an existing annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | +| 403 | Forbidden - insufficient permissions | | +| 404 | Annotation not found | | + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text using speech-to-text + +##### Description + +Convert audio to text using speech-to-text +Accepts an audio file upload and returns the transcribed text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Audio successfully transcribed | +| 400 | Bad request - no audio or invalid audio | +| 401 | Unauthorized - invalid API token | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal server error | + +### /chat-messages + +#### POST +##### Summary + +Send a message in a chat conversation + +##### Description + +Send a message in a chat conversation +This endpoint handles chat messages for chat, agent chat, and advanced chat applications. +Supports conversation management and both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatRequestPayload](#chatrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message sent successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running chat message generation + +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /completion-messages + +#### POST +##### Summary + +Create a completion for the given prompt + +##### Description + +Create a completion for the given prompt +This endpoint generates a completion based on the provided inputs and query. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionRequestPayload](#completionrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | +| 500 | Internal server error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running completion task + +##### Description + +Stop a running completion task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /conversations + +#### GET +##### Summary + +List all conversations for the current user + +##### Description + +List all conversations for the current user +Supports pagination using last_id and limit parameters. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversations retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Last conversation not found | + +### /conversations/{c_id} + +#### DELETE +##### Summary + +Delete a specific conversation + +##### Description + +Delete a specific conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/name + +#### POST +##### Summary + +Rename a conversation or auto-generate a name + +##### Description + +Rename a conversation or auto-generate a name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/variables + +#### GET +##### Summary + +List all variables for a conversation + +##### Description + +List all variables for a conversation +Conversational variables are only available for chat applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variables retrieved successfully | [ConversationVariableInfiniteScrollPaginationResponse](#conversationvariableinfinitescrollpaginationresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation not found | | + +### /conversations/{c_id}/variables/{variable_id} + +#### PUT +##### Summary + +Update a conversation variable's value + +##### Description + +Update a conversation variable's value +Allows updating the value of a specific conversation variable. +The value must match the variable's expected type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| c_id | path | Conversation ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [ConversationVariableResponse](#conversationvariableresponse) | +| 400 | Bad request - type mismatch | | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation or variable not found | | + +### /datasets + +#### GET +##### Summary + +Resource for getting datasets + +##### Description + +List all datasets + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### POST +##### Summary + +Resource for creating datasets + +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/pipeline/file-upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file to a knowledgebase pipeline +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | File uploaded successfully | +| 400 | Bad request - no file or invalid file | +| 401 | Unauthorized - invalid API token | +| 413 | File too large | +| 415 | Unsupported file type | + +### /datasets/tags + +#### DELETE +##### Summary + +Delete a knowledge type tag + +##### Description + +Delete a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagDeletePayload](#tagdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tag deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get all knowledge type tags + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### PATCH +##### Description + +Update a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUpdatePayload](#tagupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### POST +##### Summary + +Add a knowledge type tag + +##### Description + +Add a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagCreatePayload](#tagcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag created successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/binding + +#### POST +##### Description + +Bind tags to a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags bound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/unbinding + +#### POST +##### Description + +Unbind tags from a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUnbindingPayload](#tagunbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags unbound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/{dataset_id} + +#### DELETE +##### Summary + +Deletes a dataset given its ID + +##### Description + +Delete a dataset +Args: + _: ignore + dataset_id (UUID): The ID of the dataset to be deleted. + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + if the dataset was successfully deleted. Omitted in HTTP response. + int: HTTP status code 204 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Dataset deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | +| 409 | Conflict - dataset is in use | + +#### GET +##### Description + +Get a specific dataset by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +#### PATCH +##### Description + +Update an existing dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/document/create-by-file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create-by-text + +#### POST +##### Description + +Create a new document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/documents + +#### GET +##### Description + +List all documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Description + +Download selected uploaded documents as a single ZIP archive + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | ZIP archive generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Summary + +Update metadata for multiple documents + +##### Description + +Update metadata for multiple documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/status/{action} + +#### PATCH +##### Summary + +Batch update document status + +##### Description + +Batch update document status +Args: + tenant_id: tenant id + dataset_id: dataset id + action: action to perform (Literal["enable", "disable", "archive", "un_archive"]) + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + int: HTTP status code 200 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + Forbidden: If the user does not have permission. + InvalidActionError: If the action is invalid or cannot be performed. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable', 'disable', 'archive', or 'un_archive' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document status updated successfully | +| 400 | Bad request - invalid action | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/{batch}/indexing-status + +#### GET +##### Description + +Get indexing status for documents in a batch + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | Batch ID | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or documents not found | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Summary + +Delete document + +##### Description + +Delete a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Document deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - document is archived | +| 404 | Document not found | + +#### GET +##### Description + +Get a specific document by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document not found | + +#### PATCH +##### Description + +Update an existing document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Download URL generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or upload file not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### GET +##### Description + +List segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +#### POST +##### Description + +Create segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments created successfully | +| 400 | Bad request - segments data is missing | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Description + +Delete a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Segment deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### GET +##### Description + +Get a specific segment by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Update a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to update | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Description + +List child chunks for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunks retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Create a new child chunk for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Description + +Delete a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | Child chunk ID to delete | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Child chunk deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +#### PATCH +##### Description + +Update a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | +| child_chunk_id | path | Child chunk ID to update | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-text + +#### POST +##### Description + +Update an existing document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Summary + +Get all metadata for a dataset + +##### Description + +Get all metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +#### POST +##### Summary + +Create metadata for a dataset + +##### Description + +Create metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Metadata created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/built-in + +#### GET +##### Summary + +Get all built-in metadata fields + +##### Description + +Get all built-in metadata fields + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Built-in fields retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Summary + +Enable or disable built-in metadata field + +##### Description + +Enable or disable built-in metadata field + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Summary + +Delete metadata + +##### Description + +Delete metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Metadata deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +#### PATCH +##### Summary + +Update metadata name + +##### Description + +Update metadata name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +### /datasets/{dataset_id}/pipeline/datasource-plugins + +#### GET +##### Summary + +Resource for getting datasource plugins + +##### Description + +List all datasource plugins for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| is_published | query | Whether to get published or draft datasource plugins (true for published, false for draft, default: true) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource plugins retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Resource for getting datasource plugins + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource node run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/run + +#### POST +##### Summary + +Resource for running a rag pipeline + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Pipeline run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/retrieve + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/tags + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get tags bound to a specific dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /end-users/{end_user_id} + +#### GET +##### Summary + +Get end user detail + +##### Description + +Get an end user by ID +This endpoint is scoped to the current app token's tenant/app to prevent +cross-tenant/app access when an end-user ID is known. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| end_user_id | path | End user ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | End user retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | End user not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file for use in conversations +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - no file or invalid file | | +| 401 | Unauthorized - invalid API token | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /files/{file_id}/preview + +#### GET +##### Summary + +Preview/Download a file that was uploaded via Service API + +##### Description + +Preview or download a file uploaded via Service API +Provides secure file preview/download functionality. +Files can only be accessed if they belong to messages within the requesting app's context. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FilePreviewQuery](#filepreviewquery) | +| file_id | path | UUID of the file to preview | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | File retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - file access denied | +| 404 | File not found | + +### /form/human_input/{form_token} + +#### GET +##### Description + +Get a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +#### POST +##### Description + +Submit a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form submitted successfully | +| 400 | Bad request - invalid submission data | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +### /info + +#### GET +##### Summary + +Get app information + +##### Description + +Get basic application information +Returns basic information about the application including name, description, tags, and mode. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application info retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /messages + +#### GET +##### Summary + +List messages in a conversation + +##### Description + +List messages in a conversation +Retrieves messages with pagination support using first_id. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Messages retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or first message not found | + +### /messages/{message_id}/feedbacks + +#### POST +##### Summary + +Submit feedback for a message + +##### Description + +Submit feedback for a message +Allows users to rate messages as like/dislike and provide optional feedback content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | + +### /messages/{message_id}/suggested + +#### GET +##### Summary + +Get suggested follow-up questions for a message + +##### Description + +Get suggested follow-up questions for a message +Returns AI-generated follow-up questions based on the message content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Suggested questions retrieved successfully | +| 400 | Suggested questions feature is disabled | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | +| 500 | Internal server error | + +### /meta + +#### GET +##### Summary + +Get app metadata + +##### Description + +Get application metadata +Returns metadata about the application including configuration and settings. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve application input parameters and configuration +Returns the input form parameters and configuration for the application. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Parameters retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Get application site configuration +Returns the site configuration for the application including theme, icons, and text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Site configuration retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - site not found or tenant archived | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio using text-to-speech + +##### Description + +Convert text to audio using text-to-speech +Converts the provided text to audio using the specified voice. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text successfully converted to audio | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 500 | Internal server error | + +### /workflow/{task_id}/events + +#### GET +##### Description + +Get workflow execution events stream after resume + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Workflow run ID | Yes | string | +| continue_on_pause | query | Whether to keep the stream open across workflow_paused events,specify `"true"` to keep the stream open for `workflow_paused` events. | No | string | +| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string | +| user | query | End user identifier (query param) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | SSE event stream | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow run not found | + +### /workflows/logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow execution logs +Returns paginated workflow execution logs with filtering options. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowLogQuery](#workflowlogquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | +| 401 | Unauthorized - invalid API token | | + +### /workflows/run + +#### POST +##### Summary + +Execute a workflow + +##### Description + +Execute a workflow +Runs a workflow with the provided inputs and returns the results. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workflows/run/{workflow_run_id} + +#### GET +##### Summary + +Get a workflow task running detail + +##### Description + +Get workflow run details +Returns detailed information about a specific workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run details retrieved successfully | [WorkflowRunResponse](#workflowrunresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Workflow run not found | | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop a running workflow task + +##### Description + +Stop a running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /workflows/{workflow_id}/run + +#### POST +##### Summary + +Run specific workflow by ID + +##### Description + +Execute a specific workflow by ID +Executes a specific workflow version identified by its ID. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | +| workflow_id | path | Workflow ID to execute | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Summary + +Get available models by model type + +##### Description + +Get available models by model type +Returns a list of available models for the specified model type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | Type of model to retrieve | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Models retrieved successfully | +| 401 | Unauthorized - invalid API token | + +--- +### Models + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | Annotation answer | Yes | +| question | string | Annotation question | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationReplyActionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### ChatRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate_name | boolean | Auto generate conversation name | No | +| conversation_id | | Conversation UUID | No | +| files | | | No | +| inputs | object | | Yes | +| query | string | | Yes | +| response_mode | | | No | +| retriever_from | string | | No | +| workflow_id | | Workflow ID for advanced chat | No | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CompletionRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last conversation ID for pagination | No | +| limit | integer | Number of conversations to return | No | +| sort_by | string | Sort order for conversations
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariableInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| value | | | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last variable ID for pagination | No | +| limit | integer | Number of variables to return | No | +| variable_name | | Filter variables by name | No | + +#### DataSetTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| indexing_technique | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| status | | Document status filter | No | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentTextCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | string | | Yes | + +#### DocumentTextUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| name | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | | | No | + +#### FeedbackListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Number of feedbacks per page | No | +| page | integer | Page number | No | + +#### FilePreviewQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| as_attachment | boolean | Download as attachment | No | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| inputs | object | | Yes | + +#### JsonValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JsonValue | | | | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### PipelineRunApiEntity + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | +| response_mode | string | | Yes | +| start_node_id | string | | Yes | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segments | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| status | [ string ] | | No | + +#### SegmentUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | | | No | +| enabled | | | No | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segment | [SegmentUpdateArgs](#segmentupdateargs) | | Yes | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | | Yes | +| target_id | string | | Yes | + +#### TagCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### TagDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | string | | Yes | + +#### TagUnbindingPayload + +Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | | | No | +| tag_ids | [ string ] | | No | +| target_id | string | | Yes | + +#### TagUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| tag_id | string | | Yes | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | | No | +| created_at__before | | | No | +| created_by_account | | | No | +| created_by_end_user_session_id | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| response_mode | | | No | + +#### WorkflowRunResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| finished_at | | | No | +| id | string | | Yes | +| inputs | | | No | +| outputs | object | | No | +| status | string | | Yes | +| total_steps | | | No | +| total_tokens | | | No | +| workflow_id | string | | Yes | diff --git a/api/openapi/markdown/web-swagger.md b/api/openapi/markdown/web-swagger.md new file mode 100644 index 0000000000..c9b3b31357 --- /dev/null +++ b/api/openapi/markdown/web-swagger.md @@ -0,0 +1,1224 @@ +# Web API +Public APIs for web applications including file uploads, chat interactions, and app management + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## web +Web application API operations + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text + +##### Description + +Convert audio file to text using speech-to-text service. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal Server Error | + +### /chat-messages + +#### POST +##### Description + +Create a chat message for conversational applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /completion-messages + +#### POST +##### Description + +Create a completion message for text generation applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /conversations + +#### GET +##### Description + +Retrieve paginated list of conversations for a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last conversation ID for pagination | No | string | +| limit | query | Number of conversations to return (1-100) | No | integer | +| pinned | query | Filter by pinned status | No | string | +| sort_by | query | Sort order | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id} + +#### DELETE +##### Description + +Delete a specific conversation. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/name + +#### POST +##### Description + +Rename a specific conversation with a custom name or auto-generate one. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | +| auto_generate | query | Auto-generate conversation name | No | boolean | +| name | query | New conversation name | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/pin + +#### PATCH +##### Description + +Pin a specific conversation to keep it at the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation pinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/unpin + +#### PATCH +##### Description + +Unpin a specific conversation to remove it from the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation unpinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /email-code-login + +#### POST +##### Description + +Send email verification code for login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginSendPayload](#emailcodeloginsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | + +### /email-code-login/validity + +#### POST +##### Description + +Verify email code and complete login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginVerifyPayload](#emailcodeloginverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code verified and login successful | +| 400 | Bad request - invalid code or token | +| 401 | Invalid token or expired code | +| 404 | Account not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in web applications + +##### Description + +Upload a file for use in web applications +Accepts file uploads for use within web applications, supporting +multiple file types with automatic validation and storage. + +Args: + app_model: The associated application model + end_user: The end user uploading the file + +Form Parameters: + file: The file to upload (required) + source: Optional source type (datasets or None) + +Returns: + dict: File information including ID, URL, and metadata + int: HTTP status code 201 for success + +Raises: + NoFileUploadedError: No file provided in request + TooManyFilesError: Multiple files provided (only one allowed) + FilenameNotExistsError: File has no filename + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - invalid file or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset email sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | +| 429 | Too many requests - rate limit exceeded | + +### /forgot-password/resets + +#### POST +##### Description + +Reset user password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset successfully | +| 400 | Bad request - invalid parameters or password mismatch | +| 401 | Invalid or expired token | +| 404 | Account not found | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset token validity + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Token is valid | +| 400 | Bad request - invalid token format | +| 401 | Invalid or expired token | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by token + +##### Description + +GET /api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by token + +##### Description + +POST /api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Description + +Authenticate user for web application access + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Authentication successful | +| 400 | Bad request - invalid email or password format | +| 401 | Authentication failed - email or password mismatch | +| 403 | Account banned or login disabled | +| 404 | Account not found | + +### /login/status + +#### GET +##### Description + +Check login status + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Login status | +| 401 | Login status | + +### /logout + +#### POST +##### Description + +Logout user from web application + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Logout successful | + +### /messages + +#### GET +##### Description + +Retrieve paginated list of messages from a conversation in a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| conversation_id | query | Conversation UUID | Yes | string | +| first_id | query | First message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /messages/{message_id}/feedbacks + +#### POST +##### Description + +Submit feedback (like/dislike) for a specific message. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | +| content | query | Feedback content | No | string | +| rating | query | Feedback rating | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/more-like-this + +#### GET +##### Description + +Generate a new completion similar to an existing message (completion apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageMoreLikeThisQuery](#messagemorelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested follow-up questions after a message (chat apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a chat app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found or Conversation Not Found | +| 500 | Internal Server Error | + +### /meta + +#### GET +##### Summary + +Get app meta + +##### Description + +Retrieve the metadata for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve the parameters for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /passport + +#### GET +##### Description + +Get authentication passport for web application access + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Passport retrieved successfully | +| 401 | Unauthorized - missing app code or invalid authentication | +| 404 | Application or user not found | + +### /remote-files/upload + +#### POST +##### Summary + +Upload a file from a remote URL + +##### Description + +Upload a file from a remote URL +Downloads a file from the provided remote URL and uploads it +to the platform storage for use in web applications. + +Args: + app_model: The associated application model + end_user: The end user making the request + +JSON Parameters: + url: The remote URL to download the file from (required) + +Returns: + dict: File information including ID, signed URL, and metadata + int: HTTP status code 201 for success + +Raises: + RemoteFileUploadError: Failed to fetch file from remote URL + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Remote file uploaded successfully | [FileWithSignedUrl](#filewithsignedurl) | +| 400 | Bad request - invalid URL or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | +| 500 | Failed to fetch remote file | | + +### /remote-files/{url} + +#### GET +##### Summary + +Get information about a remote file + +##### Description + +Get information about a remote file +Retrieves basic information about a file located at a remote URL, +including content type and content length. + +Args: + app_model: The associated application model + end_user: The end user making the request + url: URL-encoded path to the remote file + +Returns: + dict: Remote file information including type and length + +Raises: + HTTPException: If the remote file cannot be accessed + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Remote file information retrieved successfully | [RemoteFileInfo](#remotefileinfo) | +| 400 | Bad request - invalid URL | | +| 404 | Remote file not found | | +| 500 | Failed to fetch remote file | | + +### /saved-messages + +#### GET +##### Description + +Retrieve paginated list of saved messages for a completion application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +#### POST +##### Description + +Save a specific message for later reference. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | query | Message UUID to save | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message saved successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /saved-messages/{message_id} + +#### DELETE +##### Description + +Remove a message from saved messages. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Message removed successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Retrieve app site information and configuration. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /system-features + +#### GET +##### Summary + +Get system feature flags and configuration + +##### Description + +Get system feature flags and configuration +Returns the current system feature flags and configuration +that control various functionalities across the platform. + +Returns: + dict: System feature configuration object + +This endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py, +except it is intended for use by the web app, instead of the console dashboard. + +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for webapp initialization. + +Authentication would create circular dependency (can't authenticate without webapp loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | System features retrieved successfully | +| 500 | Internal server error | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio + +##### Description + +Convert text to audio using text-to-speech service. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 500 | Internal Server Error | + +### /webapp/access-mode + +#### GET +##### Description + +Retrieve the access mode for a web application (public or restricted). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appCode | query | Application code | No | string | +| appId | query | Application ID | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 500 | Internal Server Error | + +### /webapp/permission + +#### GET +##### Description + +Check if user has permission to access a web application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appId | query | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 500 | Internal Server Error | + +### /workflows/run + +#### POST +##### Summary + +Run workflow + +##### Description + +Execute a workflow with provided inputs and files. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop a running workflow task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +--- +## default +Default namespace + +### /workflow/{task_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### AppAccessModeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| appCode | | Application code | No | +| appId | | Application ID | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Files to be processed | No | +| inputs | object | Input variables for the chat | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query/message | Yes | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Files to be processed | No | +| inputs | object | Input variables for the completion | Yes | +| query | string | Query text for completion | No | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | +| sort_by | string | *Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### EmailCodeLoginSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailCodeLoginVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### FileWithSignedUrl + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| size | integer | | Yes | +| url | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| password | string | | Yes | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MessageMoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | Yes | + +#### RemoteFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_length | integer | | Yes | +| file_type | string | | Yes | + +#### RemoteFileUploadPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| url | string (uri) | Remote file URL | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index 1b97746dea..0900dfda97 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -3,6 +3,7 @@ from collections.abc import Mapping from typing import Any, cast from unittest.mock import MagicMock +import pytest from dify_trace_aliyun.entities.semconv import ( GEN_AI_FRAMEWORK, GEN_AI_SESSION_ID, @@ -31,7 +32,7 @@ from graphon.enums import WorkflowNodeExecutionStatus from models import EndUser -def test_get_user_id_from_message_data_no_end_user(monkeypatch): +def test_get_user_id_from_message_data_no_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = None @@ -39,7 +40,7 @@ def test_get_user_id_from_message_data_no_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "account_id" -def test_get_user_id_from_message_data_with_end_user(monkeypatch): +def test_get_user_id_from_message_data_with_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -57,7 +58,7 @@ def test_get_user_id_from_message_data_with_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "session_id" -def test_get_user_id_from_message_data_end_user_not_found(monkeypatch): +def test_get_user_id_from_message_data_end_user_not_found(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -111,7 +112,7 @@ def test_get_workflow_node_status(): assert status.status_code == StatusCode.UNSET -def test_create_links_from_trace_id(monkeypatch): +def test_create_links_from_trace_id(monkeypatch: pytest.MonkeyPatch): # Mock create_link mock_link = MagicMock(spec=Link) import dify_trace_aliyun.data_exporter.traceclient diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py index 952f10c34f..95e27c791f 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py @@ -40,7 +40,7 @@ def langfuse_config(): @pytest.fixture -def trace_instance(langfuse_config, monkeypatch): +def trace_instance(langfuse_config, monkeypatch: pytest.MonkeyPatch): # Mock Langfuse client to avoid network calls mock_client = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", lambda **kwargs: mock_client) @@ -49,7 +49,7 @@ def trace_instance(langfuse_config, monkeypatch): return instance -def test_init(langfuse_config, monkeypatch): +def test_init(langfuse_config, monkeypatch: pytest.MonkeyPatch): mock_langfuse = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", mock_langfuse) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -64,7 +64,7 @@ def test_init(langfuse_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -114,7 +114,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info trace_info = WorkflowTraceInfo( workflow_id="wf-1", @@ -218,7 +218,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert other_span.level == LevelEnum.ERROR -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -259,7 +259,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): assert trace_data.name == TraceTaskName.WORKFLOW_TRACE -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -287,7 +287,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -331,7 +331,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): assert gen_data.usage.total == 30 -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -636,7 +636,7 @@ def test_langfuse_trace_entity_with_list_dict_input(): assert data.input[0]["content"] == "hello" -def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): # Setup trace info to trigger LLM node usage extraction trace_info = WorkflowTraceInfo( workflow_id="wf-1", diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py index 45e5894e4a..ee59acb17e 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py @@ -35,7 +35,7 @@ def langsmith_config(): @pytest.fixture -def trace_instance(langsmith_config, monkeypatch): +def trace_instance(langsmith_config, monkeypatch: pytest.MonkeyPatch): # Mock LangSmith client mock_client = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", lambda **kwargs: mock_client) @@ -44,7 +44,7 @@ def trace_instance(langsmith_config, monkeypatch): return instance -def test_init(langsmith_config, monkeypatch): +def test_init(langsmith_config, monkeypatch: pytest.MonkeyPatch): mock_client_class = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", mock_client_class) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -57,7 +57,7 @@ def test_init(langsmith_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -107,7 +107,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace(trace_instance, monkeypatch): +def test_workflow_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info workflow_data = MagicMock() workflow_data.created_at = _dt() @@ -223,7 +223,7 @@ def test_workflow_trace(trace_instance, monkeypatch): assert call_args[4].run_type == LangSmithRunType.retriever -def test_workflow_trace_no_start_time(trace_instance, monkeypatch): +def test_workflow_trace_no_start_time(trace_instance, monkeypatch: pytest.MonkeyPatch): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) @@ -266,7 +266,7 @@ def test_workflow_trace_no_start_time(trace_instance, monkeypatch): assert trace_instance.add_run.called -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = MagicMock(spec=WorkflowTraceInfo) trace_info.trace_id = "trace-1" trace_info.message_id = None @@ -290,7 +290,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace(trace_instance, monkeypatch): +def test_message_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -516,7 +516,7 @@ def test_update_run_error(trace_instance): trace_instance.update_run(update_data) -def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 46c9750a5d..324f894b25 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -614,7 +614,7 @@ class TestMessageTrace: span.set_status.assert_called_once() span.add_event.assert_called_once() - def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch: pytest.MonkeyPatch): span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" diff --git a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py index eefed3c78c..5daaa7132c 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py @@ -35,7 +35,7 @@ def opik_config(): @pytest.fixture -def trace_instance(opik_config, monkeypatch): +def trace_instance(opik_config, monkeypatch: pytest.MonkeyPatch): mock_client = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", lambda **kwargs: mock_client) @@ -65,7 +65,7 @@ def test_prepare_opik_uuid(): assert result is not None -def test_init(opik_config, monkeypatch): +def test_init(opik_config, monkeypatch: pytest.MonkeyPatch): mock_opik = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", mock_opik) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -82,7 +82,7 @@ def test_init(opik_config, monkeypatch): assert instance.project == opik_config.project -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -132,7 +132,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "fb05c7cd-6cec-4add-8a84-df03a408b4ce" WORKFLOW_RUN_ID = "33c67568-7a8a-450e-8916-a5f135baeaef" @@ -221,7 +221,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert trace_instance.add_span.call_count >= 1 -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "f0708b36-b1d7-42b3-a876-1d01b7d8f1a3" WORKFLOW_RUN_ID = "d42ec285-c2fd-4248-8866-5c9386b101ac" @@ -265,7 +265,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): trace_instance.add_trace.assert_called_once() -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="5745f1b8-f8e6-4859-8110-996acb6c8d6a", tenant_id="tenant-1", @@ -293,7 +293,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability MESSAGE_DATA_ID = "e3a26712-8cac-4a25-94a4-a3bff21ee3ab" CONVERSATION_ID = "9d3f3751-7521-4c19-9307-20e3cf6789a3" @@ -340,7 +340,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): trace_instance.add_span.assert_called_once() -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "85411059-79fb-4deb-a76c-c2e215f1b97e" message_data.from_account_id = "acc-1" @@ -614,7 +614,7 @@ def test_get_project_url_error(trace_instance): trace_instance.get_project_url() -def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): trace_info = WorkflowTraceInfo( workflow_id="86a52565-4a6b-4a1b-9bfd-98e4595e70de", tenant_id="66e8e918-472e-4b69-8051-12502c34fc07", diff --git a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py index 6028d0c550..30646815d8 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py @@ -267,14 +267,14 @@ class TestInit: with pytest.raises(ValueError, match="Weave login failed"): WeaveDataTrace(config) - def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL is read from environment.""" monkeypatch.setenv("FILES_URL", "http://files.example.com") config = _make_weave_config() instance = WeaveDataTrace(config) assert instance.file_base_url == "http://files.example.com" - def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL defaults to http://127.0.0.1:5001.""" monkeypatch.delenv("FILES_URL", raising=False) config = _make_weave_config() @@ -302,7 +302,7 @@ class TestGetProjectUrl: url = instance.get_project_url() assert url == "https://wandb.ai/my-project" - def test_get_project_url_exception_raises(self, trace_instance, monkeypatch): + def test_get_project_url_exception_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when exception occurs in get_project_url.""" monkeypatch.setattr(trace_instance, "entity", None) monkeypatch.setattr(trace_instance, "project_name", None) @@ -583,7 +583,7 @@ class TestFinishCall: class TestWorkflowTrace: - def _setup_repo(self, monkeypatch, nodes=None): + def _setup_repo(self, monkeypatch: pytest.MonkeyPatch, nodes=None): """Helper to patch session/repo dependencies.""" if nodes is None: nodes = [] @@ -599,7 +599,7 @@ class TestWorkflowTrace: monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) return repo - def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with no nodes and no message_id.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -614,7 +614,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 assert trace_instance.finish_call.call_count == 1 - def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with message_id creates both message and workflow runs.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -629,7 +629,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch): + def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace iterates node executions and creates node runs.""" node = _make_node( id="node-1", @@ -652,7 +652,7 @@ class TestWorkflowTrace: # workflow run + node run = 2 calls assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch): + def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """LLM node uses process_data prompts as inputs.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -680,7 +680,7 @@ class TestWorkflowTrace: # The key "messages" should be present (validator transforms the list) assert "messages" in node_run.inputs - def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch): + def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Non-LLM node uses node_execution.inputs directly.""" node = _make_node( node_type=BuiltinNodeTypes.TOOL, @@ -701,7 +701,7 @@ class TestWorkflowTrace: node_run = node_call_args[0][0] assert node_run.inputs.get("tool_input") == "val" - def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch): + def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when app_id is missing from metadata.""" monkeypatch.setattr("dify_trace_weave.weave_trace.sessionmaker", lambda bind: MagicMock()) monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) @@ -714,7 +714,7 @@ class TestWorkflowTrace: with pytest.raises(ValueError, match="No app_id found in trace_info metadata"): trace_instance.workflow_trace(trace_info) - def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch): + def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """start_time defaults to datetime.now() when None.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -727,7 +727,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 - def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch): + def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Node with created_at=None uses datetime.now().""" node = _make_node(created_at=None, elapsed_time=0.5) self._setup_repo(monkeypatch, nodes=[node]) @@ -740,7 +740,7 @@ class TestWorkflowTrace: trace_instance.workflow_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch): + def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Chat mode LLM node adds ls_provider and ls_model_name to attributes.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -765,7 +765,7 @@ class TestWorkflowTrace: assert node_run.attributes.get("ls_provider") == "openai" assert node_run.attributes.get("ls_model_name") == "gpt-4" - def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch): + def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Nodes are sorted by created_at before processing.""" node1 = _make_node(id="node-b", created_at=_dt() + timedelta(seconds=2)) node2 = _make_node(id="node-a", created_at=_dt()) @@ -799,7 +799,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) trace_instance.start_call.assert_not_called() - def test_basic_message_trace(self, trace_instance, monkeypatch): + def test_basic_message_trace(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace creates message run and llm child run.""" monkeypatch.setattr( "dify_trace_weave.weave_trace.db.session.get", @@ -816,7 +816,7 @@ class TestMessageTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_message_trace_with_file_data(self, trace_instance, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace appends file URL to file_list.""" file_data = MagicMock() file_data.url = "path/to/file.png" @@ -839,7 +839,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert "http://files.test/path/to/file.png" in message_run.file_list - def test_message_trace_with_end_user(self, trace_instance, monkeypatch): + def test_message_trace_with_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace looks up end user and sets end_user_id attribute.""" end_user = MagicMock() end_user.session_id = "session-xyz" @@ -862,7 +862,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.attributes.get("end_user_id") == "session-xyz" - def test_message_trace_no_end_user(self, trace_instance, monkeypatch): + def test_message_trace_no_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles when from_end_user_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -880,7 +880,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch): + def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """trace_id falls back to message_id when trace_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -895,7 +895,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.id == "msg-1" - def test_message_trace_file_list_none(self, trace_instance, monkeypatch): + def test_message_trace_file_list_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles file_list=None gracefully.""" mock_db = MagicMock() mock_db.session.get.return_value = None diff --git a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py index a907f918c3..37b2331f0f 100644 --- a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py +++ b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py @@ -20,7 +20,7 @@ def test_validate_distance_function_rejects_unsupported_values(): factory._validate_distance_function("dot_product") -def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch): +def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-1", @@ -45,7 +45,7 @@ def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch assert vector_cls.call_args.kwargs["collection_name"] == "existing_collection" -def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch): +def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-2", diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py index d1d471761d..2e8052b7dc 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py @@ -83,7 +83,7 @@ def test_get_type_is_analyticdb(): assert vector.get_type() == "analyticdb" -def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): +def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) @@ -109,7 +109,7 @@ def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): assert dataset.index_struct is not None -def test_factory_builds_sql_config_when_host_is_present(monkeypatch): +def test_factory_builds_sql_config_when_host_is_present(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace( id="dataset-2", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py index d2d735ae3e..26bd385333 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py @@ -24,7 +24,7 @@ def _request_class(name: str): return _Request -def _install_openapi_stubs(monkeypatch): +def _install_openapi_stubs(monkeypatch: pytest.MonkeyPatch): gpdb_package = types.ModuleType("alibabacloud_gpdb20160503") gpdb_package.__path__ = [] gpdb_models = types.ModuleType("alibabacloud_gpdb20160503.models") @@ -130,7 +130,7 @@ def test_openapi_config_to_client_params(): assert params["read_timeout"] == 60000 -def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): +def test_init_creates_openapi_client_and_runs_initialize(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) initialize_mock = MagicMock() monkeypatch.setattr(openapi_module.AnalyticdbVectorOpenAPI, "_initialize", initialize_mock) @@ -145,7 +145,7 @@ def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): initialize_mock.assert_called_once_with() -def test_initialize_skips_when_cached(monkeypatch): +def test_initialize_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -164,7 +164,7 @@ def test_initialize_skips_when_cached(monkeypatch): vector._create_namespace_if_not_exists.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -184,7 +184,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_initialize_vector_database_calls_openapi_client(monkeypatch): +def test_initialize_vector_database_calls_openapi_client(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -199,7 +199,7 @@ def test_initialize_vector_database_calls_openapi_client(monkeypatch): assert request.manager_account_password == "password" -def test_create_namespace_creates_when_namespace_not_found(monkeypatch): +def test_create_namespace_creates_when_namespace_not_found(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -211,7 +211,7 @@ def test_create_namespace_creates_when_namespace_not_found(monkeypatch): vector._client.create_namespace.assert_called_once() -def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): +def test_create_namespace_raises_on_unexpected_api_error(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -222,7 +222,7 @@ def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): vector._create_namespace_if_not_exists() -def test_create_namespace_noop_when_namespace_exists(monkeypatch): +def test_create_namespace_noop_when_namespace_exists(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -234,7 +234,7 @@ def test_create_namespace_noop_when_namespace_exists(monkeypatch): vector._client.create_namespace.assert_not_called() -def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): +def test_create_collection_if_not_exists_creates_when_missing(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -255,7 +255,7 @@ def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): +def test_create_collection_if_not_exists_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -274,7 +274,7 @@ def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): vector._client.create_collection.assert_not_called() -def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): +def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -293,7 +293,7 @@ def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): vector.create_collection_if_not_exists(embedding_dimension=512) -def test_openapi_add_delete_and_search_methods(monkeypatch): +def test_openapi_add_delete_and_search_methods(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -348,7 +348,7 @@ def test_openapi_add_delete_and_search_methods(monkeypatch): assert docs_by_text[0].page_content == "high" -def test_text_exists_returns_false_when_matches_empty(monkeypatch): +def test_text_exists_returns_false_when_matches_empty(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -361,7 +361,7 @@ def test_text_exists_returns_false_when_matches_empty(monkeypatch): assert vector.text_exists("missing-id") is False -def test_openapi_delete_success(monkeypatch): +def test_openapi_delete_success(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -372,7 +372,7 @@ def test_openapi_delete_success(monkeypatch): vector._client.delete_collection.assert_called_once() -def test_openapi_delete_propagates_errors(monkeypatch): +def test_openapi_delete_propagates_errors(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py index 49a2ae72d0..cd255b37cf 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py @@ -53,7 +53,7 @@ def test_sql_config_rejects_min_connection_greater_than_max_connection(): AnalyticdbVectorBySqlConfig.model_validate(values) -def test_initialize_skips_when_cache_exists(monkeypatch): +def test_initialize_skips_when_cache_exists(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -70,7 +70,7 @@ def test_initialize_skips_when_cache_exists(monkeypatch): vector._initialize_vector_database.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -88,7 +88,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): sql_module.redis_client.set.assert_called_once() -def test_create_connection_pool_uses_psycopg2_pool(monkeypatch): +def test_create_connection_pool_uses_psycopg2_pool(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -119,7 +119,7 @@ def test_get_cursor_context_manager_handles_connection_lifecycle(): pool.putconn.assert_called_once_with(connection) -def test_add_texts_inserts_only_documents_with_metadata(monkeypatch): +def test_add_texts_inserts_only_documents_with_metadata(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.table_name = "dify.collection" @@ -273,7 +273,7 @@ def test_delete_drops_table(): cursor.execute.assert_called_once() -def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch): +def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch: pytest.MonkeyPatch): config = AnalyticdbVectorBySqlConfig(**_config_values()) created_pool = MagicMock() @@ -288,7 +288,7 @@ def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypat assert vector.pool is created_pool -def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch): +def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -326,7 +326,7 @@ def test_initialize_vector_database_handles_existing_database_and_search_config( assert any("CREATE SCHEMA IF NOT EXISTS dify" in call.args[0] for call in worker_cursor.execute.call_args_list) -def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch): +def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -353,7 +353,7 @@ def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(mon worker_connection.rollback.assert_called_once() -def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch): +def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" @@ -381,7 +381,7 @@ def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeyp sql_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch): +def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" diff --git a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py index 851c09f47a..f0dddee3b9 100644 --- a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py +++ b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py @@ -121,7 +121,7 @@ def _build_fake_pymochow_modules(): @pytest.fixture -def baidu_module(monkeypatch): +def baidu_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymochow_modules().items(): monkeypatch.setitem(sys.modules, name, module) import dify_vdb_baidu.baidu_vector as module @@ -254,7 +254,7 @@ def test_search_methods_delegate_to_database_table(baidu_module): assert vector._get_search_res.call_count == 2 -def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch): +def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch: pytest.MonkeyPatch): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) monkeypatch.setattr(baidu_module.Dataset, "gen_collection_name_by_id", lambda _id: "AUTO_COLLECTION") @@ -279,7 +279,7 @@ def test_factory_initializes_collection_name_and_index_struct(baidu_module, monk assert dataset.index_struct is not None -def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch): +def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch: pytest.MonkeyPatch): init_client = MagicMock(return_value="client") init_database = MagicMock(return_value="database") monkeypatch.setattr(baidu_module.BaiduVector, "_init_client", init_client) @@ -372,7 +372,7 @@ def test_get_search_result_handles_invalid_metadata_json(baidu_module): assert "document_id" not in docs[0].metadata -def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch): +def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch: pytest.MonkeyPatch): credentials = MagicMock(return_value="credentials") configuration = MagicMock(return_value="configuration") client_cls = MagicMock(return_value="client") @@ -411,7 +411,7 @@ def test_init_database_raises_for_unknown_create_database_error(baidu_module): vector._init_database() -def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch): +def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -460,7 +460,7 @@ def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypat vector._wait_for_index_ready.assert_called_once_with(table, 3600) -def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch): +def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._db = MagicMock() @@ -493,7 +493,7 @@ def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypat vector._create_table(3) -def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch): +def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -524,7 +524,9 @@ def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, vector._create_table(3) -def test_factory_uses_existing_collection_prefix_when_index_struct_exists(baidu_module, monkeypatch): +def test_factory_uses_existing_collection_prefix_when_index_struct_exists( + baidu_module, monkeypatch: pytest.MonkeyPatch +): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py index b209c9df96..f18f9a6561 100644 --- a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py +++ b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py @@ -44,7 +44,7 @@ def _build_fake_chroma_modules(): @pytest.fixture -def chroma_module(monkeypatch): +def chroma_module(monkeypatch: pytest.MonkeyPatch): fake_chroma = _build_fake_chroma_modules() monkeypatch.setitem(sys.modules, "chromadb", fake_chroma) import dify_vdb_chroma.chroma_vector as module @@ -73,7 +73,7 @@ def test_chroma_config_to_params_builds_expected_payload(chroma_module): assert params["settings"].chroma_client_auth_credentials == "credentials" -def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch): +def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -173,7 +173,7 @@ def test_search_by_full_text_returns_empty_list(chroma_module): assert vector.search_by_full_text("query") == [] -def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch): +def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch: pytest.MonkeyPatch): factory = chroma_module.ChromaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py index a7473f1b91..4f8395e475 100644 --- a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py +++ b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py @@ -45,7 +45,7 @@ def _build_fake_clickzetta_module(): @pytest.fixture -def clickzetta_module(monkeypatch): +def clickzetta_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "clickzetta", _build_fake_clickzetta_module()) import dify_vdb_clickzetta.clickzetta_vector as module @@ -218,7 +218,7 @@ def test_search_by_like_returns_documents_with_default_score(clickzetta_module): assert docs[0].metadata["score"] == 0.5 -def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): +def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch: pytest.MonkeyPatch): factory = clickzetta_module.ClickzettaVectorFactory() dataset = SimpleNamespace(id="dataset-1") @@ -243,7 +243,7 @@ def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): assert vector_cls.call_args.kwargs["collection_name"] == "collection" -def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch): +def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch: pytest.MonkeyPatch): clickzetta_module.ClickzettaConnectionPool._instance = None monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) @@ -255,7 +255,7 @@ def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch assert "username:instance:service:workspace:cluster:dify" in key -def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -274,7 +274,7 @@ def test_connection_pool_create_connection_retries_and_configures(clickzetta_mod pool._configure_connection.assert_called_once_with(connection) -def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -318,7 +318,7 @@ def test_connection_pool_configure_connection_swallows_errors(clickzetta_module) monkeypatch.undo() -def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch): +def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -360,7 +360,7 @@ def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monk assert pool._shutdown is True -def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch): +def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False pool._cleanup_expired_connections = MagicMock(side_effect=lambda: setattr(pool, "_shutdown", True)) @@ -384,7 +384,7 @@ def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module pool._cleanup_expired_connections.assert_called_once() -def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch): +def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() pool.get_connection.return_value = "conn" monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "get_instance", MagicMock(return_value=pool)) @@ -405,7 +405,7 @@ def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypat assert vector._ensure_connection() == "conn" -def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch): +def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch: pytest.MonkeyPatch): class _Thread: def __init__(self, target, daemon): self.target = target @@ -579,7 +579,7 @@ def test_create_inverted_index_branches(clickzetta_module): vector._create_inverted_index(cursor) -def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch): +def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch: pytest.MonkeyPatch): vector = clickzetta_module.ClickzettaVector.__new__(clickzetta_module.ClickzettaVector) vector._config = _config(clickzetta_module) vector._config.batch_size = 2 @@ -811,7 +811,7 @@ def test_clickzetta_pool_cleanup_and_shutdown_edge_paths(clickzetta_module): assert pool._shutdown is True -def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch): +def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False diff --git a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py index 7e5c40b8f2..d474b566d3 100644 --- a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py +++ b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py @@ -150,7 +150,7 @@ def _build_fake_couchbase_modules(): @pytest.fixture -def couchbase_module(monkeypatch): +def couchbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_couchbase_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -194,7 +194,7 @@ def test_init_sets_cluster_handles(couchbase_module): vector._cluster.wait_until_ready.assert_called_once() -def test_create_and_create_collection_branches(couchbase_module, monkeypatch): +def test_create_and_create_collection_branches(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector.__new__(couchbase_module.CouchbaseVector) vector._collection_name = "collection_1" vector._client_config = _config(couchbase_module) @@ -319,7 +319,7 @@ def test_search_methods_and_format_metadata(couchbase_module): assert vector._format_metadata({"metadata.a": 1, "plain": 2}) == {"a": 1, "plain": 2} -def test_delete_collection_and_factory(couchbase_module, monkeypatch): +def test_delete_collection_and_factory(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector("collection_1", _config(couchbase_module)) scopes = [ SimpleNamespace(collections=[SimpleNamespace(name="other")]), diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py index f81ed6beea..91cc2e0fdb 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py @@ -28,7 +28,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_ja_module(monkeypatch): +def elasticsearch_ja_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -39,7 +39,7 @@ def elasticsearch_ja_module(monkeypatch): return importlib.reload(ja_module) -def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): +def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -57,7 +57,7 @@ def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): elasticsearch_ja_module.redis_client.set.assert_not_called() -def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch): +def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -87,7 +87,7 @@ def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monk elasticsearch_ja_module.redis_client.set.assert_called_once() -def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch): +def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_ja_module.ElasticSearchJaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py index 48f1f6dc26..d54c105a0f 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py @@ -38,7 +38,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_module(monkeypatch): +def elasticsearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -287,7 +287,7 @@ def test_search_by_vector_and_full_text(elasticsearch_module): assert "bool" in query -def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): +def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): elasticsearch_module.redis_client.set.assert_called_once() -def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch): +def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_module.ElasticSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py index f9a557ecce..8b197662e3 100644 --- a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py +++ b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py @@ -38,7 +38,7 @@ def _build_fake_hologres_modules(): @pytest.fixture -def hologres_module(monkeypatch): +def hologres_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_hologres_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -266,7 +266,7 @@ def test_delete_handles_existing_and_missing_tables(hologres_module): vector._client.drop_table.assert_called_once_with(vector.table_name) -def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch): +def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -281,7 +281,7 @@ def test_create_collection_returns_early_when_cache_hits(hologres_module, monkey hologres_module.redis_client.set.assert_not_called() -def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch): +def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -313,7 +313,7 @@ def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatc hologres_module.redis_client.set.assert_called_once() -def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch): +def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -331,7 +331,7 @@ def test_create_collection_raises_when_table_never_becomes_ready(hologres_module hologres_module.redis_client.set.assert_not_called() -def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch): +def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch: pytest.MonkeyPatch): factory = hologres_module.HologresVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py index ba3f14912b..a1617b6d43 100644 --- a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py +++ b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py @@ -29,7 +29,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def huawei_module(monkeypatch): +def huawei_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -155,7 +155,7 @@ def test_search_by_vector_and_full_text(huawei_module): assert docs[0].page_content == "text-hit" -def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch): +def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch: pytest.MonkeyPatch): class FakeDocument: def __init__(self, page_content, vector, metadata): self.page_content = page_content @@ -185,7 +185,7 @@ def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch assert docs == [] -def test_create_and_create_collection_paths(huawei_module, monkeypatch): +def test_create_and_create_collection_paths(huawei_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -218,7 +218,7 @@ def test_create_and_create_collection_paths(huawei_module, monkeypatch): huawei_module.redis_client.set.assert_called_once() -def test_huawei_factory_branches(huawei_module, monkeypatch): +def test_huawei_factory_branches(huawei_module, monkeypatch: pytest.MonkeyPatch): factory = huawei_module.HuaweiCloudVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py index 8c038e82b9..b4ea6ea6c1 100644 --- a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py +++ b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py @@ -23,7 +23,7 @@ def _build_fake_iris_module(): @pytest.fixture -def iris_module(monkeypatch): +def iris_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "iris", _build_fake_iris_module()) import dify_vdb_iris.iris_vector as module @@ -249,7 +249,7 @@ def test_iris_vector_init_get_cursor_and_create(iris_module): vector._create_collection.assert_called_once_with(2) -def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): +def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module)) @@ -297,7 +297,7 @@ def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): assert docs[0].metadata["score"] == pytest.approx(0.9) -def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): +def test_iris_vector_full_text_search_paths(iris_module, monkeypatch: pytest.MonkeyPatch): cfg = _config(iris_module, IRIS_TEXT_INDEX=True) with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", cfg) @@ -344,7 +344,7 @@ def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): assert vector_like.search_by_full_text("100%", top_k=1) == [] -def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch): +def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module, IRIS_TEXT_INDEX=True)) diff --git a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py index 238145c1d6..4a408d1b10 100644 --- a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py +++ b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py @@ -47,7 +47,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def lindorm_module(monkeypatch): +def lindorm_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -100,7 +100,7 @@ def test_to_opensearch_params_and_init(lindorm_module): assert vector_ugc._routing == "route" -def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch): +def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore( "collection", _config(lindorm_module), using_ugc=True, routing_value="route" ) @@ -301,7 +301,7 @@ def test_search_by_full_text_success_and_error(lindorm_module): vector.search_by_full_text("hello") -def test_create_collection_paths(lindorm_module, monkeypatch): +def test_create_collection_paths(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore("collection", _config(lindorm_module), using_ugc=False) with pytest.raises(ValueError, match="cannot be empty"): @@ -331,7 +331,7 @@ def test_create_collection_paths(lindorm_module, monkeypatch): vector._client.indices.create.assert_not_called() -def test_lindorm_factory_branches(lindorm_module, monkeypatch): +def test_lindorm_factory_branches(lindorm_module, monkeypatch: pytest.MonkeyPatch): factory = lindorm_module.LindormVectorStoreFactory() monkeypatch.setattr(lindorm_module.dify_config, "LINDORM_URL", "http://localhost:9200") diff --git a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py index c22f4304e5..762ec330b2 100644 --- a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py +++ b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py @@ -32,7 +32,7 @@ def _build_fake_mo_vector_modules(): @pytest.fixture -def matrixone_module(monkeypatch): +def matrixone_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_mo_vector_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -70,7 +70,7 @@ def test_matrixone_config_validation(matrixone_module, field, value, message): matrixone_module.MatrixoneConfig.model_validate(values) -def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch): +def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -86,7 +86,7 @@ def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, matrixone_module.redis_client.set.assert_called_once() -def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch): +def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -146,7 +146,7 @@ def test_get_type_and_create_delegate_to_add_texts(matrixone_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch): +def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -165,7 +165,7 @@ def test_get_client_handles_full_text_index_creation_error(matrixone_module, mon matrixone_module.redis_client.set.assert_not_called() -def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch): +def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch: pytest.MonkeyPatch): vector = matrixone_module.MatrixoneVector("collection_1", _valid_config(matrixone_module)) vector.client = MagicMock() monkeypatch.setattr(matrixone_module.uuid, "uuid4", lambda: "generated-uuid") @@ -224,7 +224,7 @@ def test_search_by_vector_builds_documents(matrixone_module): assert vector.client.query.call_args.kwargs["filter"] == {"document_id": {"$in": ["d-1"]}} -def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch): +def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch: pytest.MonkeyPatch): factory = matrixone_module.MatrixoneVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py index 36c0ed8f6f..730ff9f296 100644 --- a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py +++ b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py @@ -99,7 +99,7 @@ def _build_fake_pymilvus_modules(): @pytest.fixture -def milvus_module(monkeypatch): +def milvus_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymilvus_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -327,7 +327,7 @@ def test_process_search_results_and_search_methods(milvus_module): assert "document_id" in vector._client.search.call_args.kwargs["filter"] -def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch): +def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -351,7 +351,7 @@ def test_create_collection_cache_and_existing_collection(milvus_module, monkeypa milvus_module.redis_client.set.assert_called() -def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch): +def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -385,7 +385,7 @@ def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch) assert call_kwargs["consistency_level"] == "Session" -def test_factory_initializes_milvus_vector(milvus_module, monkeypatch): +def test_factory_initializes_milvus_vector(milvus_module, monkeypatch: pytest.MonkeyPatch): factory = milvus_module.MilvusVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py index 228ea92639..900c75fdab 100644 --- a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py +++ b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py @@ -38,7 +38,7 @@ def _build_fake_clickhouse_connect_module(): @pytest.fixture -def myscale_module(monkeypatch): +def myscale_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_clickhouse_connect_module() monkeypatch.setitem(sys.modules, "clickhouse_connect", fake_module) @@ -90,7 +90,7 @@ def test_delete_by_ids_short_circuits_on_empty_list(myscale_module): vector._client.command.assert_not_called() -def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch): +def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch: pytest.MonkeyPatch): factory = myscale_module.MyScaleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -160,7 +160,7 @@ def test_create_collection_builds_expected_sql(myscale_module): assert "INDEX text_idx text TYPE fts('tokenizer=unicode')" in sql -def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch): +def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch: pytest.MonkeyPatch): vector = myscale_module.MyScaleVector("collection_1", _config(myscale_module)) monkeypatch.setattr(myscale_module.uuid, "uuid4", lambda: "generated-uuid") docs = [ diff --git a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py index 31f9ff3e56..36393cc486 100644 --- a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py +++ b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py @@ -53,7 +53,7 @@ def _build_fake_pyobvector_module(): @pytest.fixture -def oceanbase_module(monkeypatch): +def oceanbase_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "pyobvector", _build_fake_pyobvector_module()) import dify_vdb_oceanbase.oceanbase_vector as module @@ -208,7 +208,7 @@ def test_create_delegates_to_collection_and_insert(oceanbase_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch): +def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -234,7 +234,7 @@ def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_mod vector.delete.assert_not_called() -def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch): +def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -271,7 +271,7 @@ def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, mo oceanbase_module.redis_client.set.assert_called_once() -def test_create_collection_error_paths(oceanbase_module, monkeypatch): +def test_create_collection_error_paths(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -308,7 +308,7 @@ def test_create_collection_error_paths(oceanbase_module, monkeypatch): vector._create_collection() -def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch): +def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -517,7 +517,7 @@ def test_delete_success_and_exception(oceanbase_module): vector.delete() -def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch): +def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch: pytest.MonkeyPatch): factory = oceanbase_module.OceanBaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py index 09abd625fc..57c9b14d9f 100644 --- a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py +++ b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def opengauss_module(monkeypatch): +def opengauss_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -88,7 +88,7 @@ def test_opengauss_config_validation_rejects_min_greater_than_max(opengauss_modu opengauss_module.OpenGaussConfig.model_validate(values) -def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): +def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -99,7 +99,7 @@ def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): assert vector.pool is pool -def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): +def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -126,7 +126,7 @@ def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch): +def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -158,7 +158,7 @@ def test_search_by_vector_validates_top_k(opengauss_module): vector.search_by_vector([0.1, 0.2], top_k=0) -def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch): +def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -200,7 +200,7 @@ def test_create_calls_collection_insert_and_index(opengauss_module): vector._create_index.assert_called_once_with(2) -def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): +def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -220,7 +220,7 @@ def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_not_called() -def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch): +def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -245,7 +245,7 @@ def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, m assert any("embedding_cosine_embedding_collection_1_idx" in query for query in sql) -def test_add_texts_uses_execute_values(opengauss_module, monkeypatch): +def test_add_texts_uses_execute_values(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -342,7 +342,7 @@ def test_search_by_full_text_validates_top_k(opengauss_module): vector.search_by_full_text("query", top_k=0) -def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): +def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) lock = MagicMock() @@ -370,7 +370,7 @@ def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch): +def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch: pytest.MonkeyPatch): factory = opengauss_module.OpenGaussFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py index f2ed7cb6fb..b2b004a4de 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py @@ -59,7 +59,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -95,7 +95,7 @@ class TestOpenSearchConfig: assert params["connection_class"].__name__ == "Urllib3HttpConnection" assert params["http_auth"] == ("admin", "password") - def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch): + def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py index 1c2921f85b..80bf20e820 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py @@ -58,7 +58,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -116,7 +116,7 @@ def test_config_validation_for_aws_auth_and_https_fields(opensearch_module): opensearch_module.OpenSearchConfig.model_validate(values) -def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch): +def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" @@ -167,7 +167,7 @@ def test_init_and_create_delegate_calls(opensearch_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch): +def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch: pytest.MonkeyPatch): vector = opensearch_module.OpenSearchVector("Collection_1", _config(opensearch_module, aws_service="es")) docs = [ Document(page_content="a", metadata={"doc_id": "1"}), @@ -308,7 +308,7 @@ def test_search_by_full_text_and_filters(opensearch_module): assert query["query"]["bool"]["filter"] == [{"terms": {"metadata.document_id": ["d-1"]}}] -def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch): +def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch) opensearch_module.redis_client.set.assert_called() -def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch): +def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch: pytest.MonkeyPatch): factory = opensearch_module.OpenSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py index 678cf876b0..46027c7e44 100644 --- a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py +++ b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py @@ -51,7 +51,7 @@ def _connection_with_cursor(cursor): @pytest.fixture -def oracle_module(monkeypatch): +def oracle_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_oracle_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -94,7 +94,7 @@ def test_oracle_config_validation_autonomous_requirements(oracle_module): ) -def test_init_and_get_type(oracle_module, monkeypatch): +def test_init_and_get_type(oracle_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(oracle_module.oracledb, "create_pool", MagicMock(return_value=pool)) vector = oracle_module.OracleVector("collection_1", _config(oracle_module)) @@ -139,7 +139,7 @@ def test_numpy_converters_and_type_handlers(oracle_module): assert out_float64.dtype == numpy.float64 -def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch): +def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): connect = MagicMock(return_value="connection") monkeypatch.setattr(oracle_module.oracledb, "connect", connect) @@ -173,7 +173,7 @@ def test_create_delegates_collection_and_insert(oracle_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch): +def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector.input_type_handler = MagicMock() @@ -279,7 +279,7 @@ def _fake_nltk_module(*, missing_data=False): return nltk, nltk_corpus -def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch): +def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" @@ -305,7 +305,7 @@ def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatc assert "doc_id_0" in en_params -def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch): +def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector._get_connection = MagicMock() @@ -320,7 +320,7 @@ def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeyp vector.search_by_full_text("english query") -def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): +def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -346,7 +346,9 @@ def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): oracle_module.redis_client.set.assert_called_once() -def test_oracle_factory_init_vector_uses_existing_or_generated_collection(oracle_module, monkeypatch): +def test_oracle_factory_init_vector_uses_existing_or_generated_collection( + oracle_module, monkeypatch: pytest.MonkeyPatch +): factory = oracle_module.OracleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py index c3291f7f12..1841e88139 100644 --- a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py +++ b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py @@ -79,7 +79,7 @@ def _patch_both(monkeypatch, module, calls, execute_results=None): @pytest.fixture -def pgvecto_module(monkeypatch): +def pgvecto_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pgvecto_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -126,7 +126,7 @@ def test_collection_base_has_expected_annotations(pgvecto_module): assert {"id", "text", "meta", "vector"} <= set(annotations) -def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): +def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -145,7 +145,7 @@ def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -169,7 +169,7 @@ def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): module.redis_client.set.assert_called() -def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): +def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] runtime_calls = [] @@ -241,7 +241,7 @@ def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): assert any("DROP TABLE IF EXISTS collection_1" in str(args[0]) for args, _ in runtime_calls) -def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): +def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -313,7 +313,7 @@ def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): assert vector.search_by_full_text("hello") == [] -def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch): +def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module factory = module.PGVectoRSFactory() dataset_with_index = SimpleNamespace( diff --git a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py index 99a6e00c16..38e472df63 100644 --- a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py +++ b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py @@ -336,7 +336,7 @@ def test_create_delegates_collection_creation_and_insert(): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch): +def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" @@ -387,7 +387,7 @@ def test_text_get_and_delete_methods(): assert any("DROP TABLE IF EXISTS embedding_collection_1" in sql for sql in executed_sql) -def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch): +def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" cursor = MagicMock() @@ -464,7 +464,7 @@ def test_search_by_full_text_branches_for_bigm_and_standard(): assert "bigm_similarity" in cursor.execute.call_args_list[1].args[0] -def test_pgvector_factory_initializes_expected_collection_name(monkeypatch): +def test_pgvector_factory_initializes_expected_collection_name(monkeypatch: pytest.MonkeyPatch): factory = pgvector_module.PGVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py index 0ed5491fbe..89ee0a47f1 100644 --- a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py +++ b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py @@ -121,7 +121,7 @@ def _build_fake_qdrant_modules(): @pytest.fixture -def qdrant_module(monkeypatch): +def qdrant_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_qdrant_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -170,7 +170,7 @@ def test_init_and_basic_behaviour(qdrant_module): vector.add_texts.assert_called_once() -def test_create_collection_and_add_texts(qdrant_module, monkeypatch): +def test_create_collection_and_add_texts(qdrant_module, monkeypatch: pytest.MonkeyPatch): vector = qdrant_module.QdrantVector("collection_1", "group-1", _config(qdrant_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -288,7 +288,7 @@ def test_search_and_helper_methods(qdrant_module): assert doc.page_content == "doc" -def test_qdrant_factory_paths(qdrant_module, monkeypatch): +def test_qdrant_factory_paths(qdrant_module, monkeypatch: pytest.MonkeyPatch): factory = qdrant_module.QdrantVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py index f97ad1400a..c5f3a9f847 100644 --- a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py +++ b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py @@ -59,7 +59,7 @@ def _patch_both(monkeypatch, module, session): @pytest.fixture -def relyt_module(monkeypatch): +def relyt_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_relyt_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -97,7 +97,7 @@ def test_relyt_config_validation(relyt_module, field, value, message): relyt_module.RelytConfig.model_validate(values) -def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): +def test_init_get_type_and_create_delegate(relyt_module, monkeypatch: pytest.MonkeyPatch): engine = MagicMock() monkeypatch.setattr(relyt_module, "create_engine", MagicMock(return_value=engine)) vector = relyt_module.RelytVector("collection_1", _config(relyt_module), group_id="group-1") @@ -114,7 +114,7 @@ def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -142,7 +142,7 @@ def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): relyt_module.redis_client.set.assert_called_once() -def test_add_texts_and_metadata_queries(relyt_module, monkeypatch): +def test_add_texts_and_metadata_queries(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector._group_id = "group-1" @@ -212,7 +212,7 @@ def test_delete_by_metadata_field_calls_delete_by_uuids(relyt_module): # 3. delete_by_ids translates to uuids -def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): +def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -225,7 +225,7 @@ def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): # 4. text_exists True -def test_text_exists_true(relyt_module, monkeypatch): +def test_text_exists_true(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -236,7 +236,7 @@ def test_text_exists_true(relyt_module, monkeypatch): # 5. text_exists False -def test_text_exists_false(relyt_module, monkeypatch): +def test_text_exists_false(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -284,7 +284,7 @@ def test_search_by_vector_filters_by_score_and_ids(relyt_module): # 8. delete commits session -def test_delete_drops_table(relyt_module, monkeypatch): +def test_delete_drops_table(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -295,7 +295,7 @@ def test_delete_drops_table(relyt_module, monkeypatch): session.execute.assert_called_once() -def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch): +def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch: pytest.MonkeyPatch): factory = relyt_module.RelytVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py index 62a11e0445..49d4b160cf 100644 --- a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py +++ b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py @@ -77,7 +77,7 @@ def _build_fake_tablestore_module(): @pytest.fixture -def tablestore_module(monkeypatch): +def tablestore_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_tablestore_module() monkeypatch.setitem(sys.modules, "tablestore", fake_module) @@ -177,7 +177,7 @@ def test_get_by_ids_text_exists_delete_and_wrappers(tablestore_module): vector._delete_table_if_exist.assert_called_once() -def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch): +def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch: pytest.MonkeyPatch): vector = tablestore_module.TableStoreVector("collection_1", _config(tablestore_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -289,7 +289,7 @@ def test_write_row_and_search_helpers(tablestore_module): assert "score" not in docs[0].metadata -def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch): +def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch: pytest.MonkeyPatch): factory = tablestore_module.TableStoreVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py index 299e40ee1e..e1fe227a29 100644 --- a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py +++ b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py @@ -136,7 +136,7 @@ def _build_fake_tencent_modules(): @pytest.fixture -def tencent_module(monkeypatch): +def tencent_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_tencent_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -187,7 +187,7 @@ def test_config_and_init_paths(tencent_module): assert vector._enable_hybrid_search is False -def test_create_collection_branches(tencent_module, monkeypatch): +def test_create_collection_branches(tencent_module, monkeypatch: pytest.MonkeyPatch): vector = tencent_module.TencentVector("collection_1", _config(tencent_module)) lock = MagicMock() @@ -279,7 +279,7 @@ def test_create_add_delete_and_search_behaviour(tencent_module): vector._client.drop_collection.assert_called_once() -def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch): +def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch: pytest.MonkeyPatch): factory = tencent_module.TencentVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py index bdbed2f740..ed03cbee88 100644 --- a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py +++ b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py @@ -46,7 +46,7 @@ def test_tidb_config_validation(tidb_module, field, value, message): tidb_module.TiDBVectorConfig.model_validate(values) -def test_init_get_type_and_distance_func(tidb_module, monkeypatch): +def test_init_get_type_and_distance_func(tidb_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(tidb_module, "create_engine", MagicMock(return_value="engine")) vector = tidb_module.TiDBVector("collection_1", _config(tidb_module), distance_func="L2") @@ -63,7 +63,7 @@ def test_init_get_type_and_distance_func(tidb_module, monkeypatch): assert vector._get_distance_func() == "VEC_COSINE_DISTANCE" -def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch): +def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch: pytest.MonkeyPatch): fake_tidb_vector = types.ModuleType("tidb_vector") fake_tidb_sqlalchemy = types.ModuleType("tidb_vector.sqlalchemy") @@ -107,7 +107,7 @@ def test_create_calls_collection_and_add_texts(tidb_module): assert vector._dimension == 2 -def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): +def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -127,7 +127,7 @@ def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): tidb_module.redis_client.set.assert_not_called() -def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch): +def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -160,7 +160,7 @@ def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monke tidb_module.redis_client.set.assert_called_once() -def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): +def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch: pytest.MonkeyPatch): class _InsertStmt: def __init__(self, table): self.table = table @@ -198,7 +198,7 @@ def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): @pytest.fixture -def tidb_vector_with_session(tidb_module, monkeypatch): +def tidb_vector_with_session(tidb_module, monkeypatch: pytest.MonkeyPatch): vector = tidb_module.TiDBVector.__new__(tidb_module.TiDBVector) vector._collection_name = "collection_1" vector._engine = MagicMock() @@ -354,7 +354,7 @@ def test_delete_by_metadata_field_does_nothing_when_no_ids(tidb_module): # Test search_by_vector filters and scores -def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): +def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = [ ('{"doc_id":"id-1","document_id":"d-1"}', "text-1", 0.2), @@ -392,7 +392,7 @@ def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): # Test delete drops table -def test_delete_drops_table(tidb_module, monkeypatch): +def test_delete_drops_table(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = None @@ -413,7 +413,7 @@ def test_delete_drops_table(tidb_module, monkeypatch): assert "DROP TABLE IF EXISTS collection_1" in drop_sql -def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch): +def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch: pytest.MonkeyPatch): factory = tidb_module.TiDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py index a884275c89..55d27ad264 100644 --- a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py +++ b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py @@ -36,7 +36,7 @@ def _build_fake_upstash_module(): @pytest.fixture -def upstash_module(monkeypatch): +def upstash_module(monkeypatch: pytest.MonkeyPatch): # Remove patched modules if present for modname in ["upstash_vector", "dify_vdb_upstash.upstash_vector"]: if modname in sys.modules: @@ -65,7 +65,7 @@ def test_upstash_config_validation(upstash_module, field, value, message): upstash_module.UpstashVectorConfig.model_validate(values) -def test_init_get_type_and_dimension(upstash_module, monkeypatch): +def test_init_get_type_and_dimension(upstash_module, monkeypatch: pytest.MonkeyPatch): vector = upstash_module.UpstashVector("collection_1", _config(upstash_module)) assert vector.get_type() == upstash_module.VectorType.UPSTASH @@ -162,7 +162,7 @@ def test_search_by_vector_filter_threshold_and_delete(upstash_module): vector.index.reset.assert_called_once() -def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch): +def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch: pytest.MonkeyPatch): factory = upstash_module.UpstashVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py index 4dfb956c00..32f47c67ed 100644 --- a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py +++ b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def vastbase_module(monkeypatch): +def vastbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -93,7 +93,7 @@ def test_vastbase_config_rejects_invalid_connection_window(vastbase_module): ) -def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): +def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(vastbase_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -114,7 +114,7 @@ def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): pool.putconn.assert_called_once_with(conn) -def test_create_and_add_texts(vastbase_module, monkeypatch): +def test_create_and_add_texts(vastbase_module, monkeypatch: pytest.MonkeyPatch): vector = vastbase_module.VastbaseVector.__new__(vastbase_module.VastbaseVector) vector.table_name = "embedding_collection_1" vector._create_collection = MagicMock() @@ -205,7 +205,7 @@ def test_search_by_vector_and_full_text(vastbase_module): assert full_docs[0].page_content == "full-text" -def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch): +def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -240,7 +240,7 @@ def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeyp vastbase_module.redis_client.set.assert_called() -def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch): +def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch: pytest.MonkeyPatch): factory = vastbase_module.VastbaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py index 544b8163be..6559ad97d2 100644 --- a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py +++ b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py @@ -79,7 +79,7 @@ def _build_fake_vikingdb_modules(): @pytest.fixture -def vikingdb_module(monkeypatch): +def vikingdb_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_vikingdb_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -117,7 +117,7 @@ def test_init_get_type_and_has_checks(vikingdb_module): assert vector._has_index() is False -def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch): +def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -253,7 +253,7 @@ def test_delete_drops_index_and_collection_when_present(vikingdb_module): vector._client.drop_collection.assert_not_called() -def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch): +def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch: pytest.MonkeyPatch): factory = vikingdb_module.VikingDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -293,7 +293,9 @@ def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, mo ("VIKINGDB_SCHEME", "VIKINGDB_SCHEME should not be None"), ], ) -def test_vikingdb_factory_raises_when_required_config_missing(vikingdb_module, monkeypatch, field, message): +def test_vikingdb_factory_raises_when_required_config_missing( + vikingdb_module, monkeypatch: pytest.MonkeyPatch, field, message +): factory = vikingdb_module.VikingDBVectorFactory() dataset = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "existing"}}, index_struct=None diff --git a/api/pyproject.toml b/api/pyproject.toml index 69add5c68d..6c30779f9d 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -45,7 +45,7 @@ dependencies = [ # Emerging: newer and fast-moving, use compatible pins "fastopenapi[flask]~=0.7.0", - "graphon~=0.2.2", + "graphon~=0.3.0", "httpx-sse~=0.4.0", "json-repair~=0.59.4", ] @@ -103,6 +103,7 @@ dify-trace-weave = { workspace = true } default-groups = ["storage", "tools", "vdb-all", "trace-all"] package = false override-dependencies = [ + "litellm>=1.83.7", "pyarrow>=18.0.0", ] diff --git a/api/services/credit_pool_service.py b/api/services/credit_pool_service.py index 2d210db121..1f419d7a5b 100644 --- a/api/services/credit_pool_service.py +++ b/api/services/credit_pool_service.py @@ -1,7 +1,7 @@ import logging -from sqlalchemy import select, update -from sqlalchemy.orm import sessionmaker +from sqlalchemy import select +from sqlalchemy.orm import Session, sessionmaker from configs import dify_config from core.errors.error import QuotaExceededError @@ -13,6 +13,18 @@ logger = logging.getLogger(__name__) class CreditPoolService: + @staticmethod + def _get_locked_pool(session: Session, tenant_id: str, pool_type: str) -> TenantCreditPool | None: + return session.scalar( + select(TenantCreditPool) + .where( + TenantCreditPool.tenant_id == tenant_id, + TenantCreditPool.pool_type == pool_type, + ) + .limit(1) + .with_for_update() + ) + @classmethod def create_default_pool(cls, tenant_id: str) -> TenantCreditPool: """create default credit pool for new tenant""" @@ -59,31 +71,57 @@ class CreditPoolService: credits_required: int, pool_type: str = "trial", ) -> int: - """check and deduct credits, returns actual credits deducted""" - - pool = cls.get_pool(tenant_id, pool_type) - if not pool: - raise QuotaExceededError("Credit pool not found") - - if pool.remaining_credits <= 0: - raise QuotaExceededError("No credits remaining") - - # deduct all remaining credits if less than required - actual_credits = min(credits_required, pool.remaining_credits) + """Deduct exactly the requested credits or raise without mutating the pool.""" + if credits_required <= 0: + return 0 try: - with sessionmaker(db.engine).begin() as session: - stmt = ( - update(TenantCreditPool) - .where( - TenantCreditPool.tenant_id == tenant_id, - TenantCreditPool.pool_type == pool_type, - ) - .values(quota_used=TenantCreditPool.quota_used + actual_credits) - ) - session.execute(stmt) + with sessionmaker(db.engine, expire_on_commit=False).begin() as session: + pool = cls._get_locked_pool(session=session, tenant_id=tenant_id, pool_type=pool_type) + if not pool: + raise QuotaExceededError("Credit pool not found") + + remaining_credits = pool.remaining_credits + if remaining_credits <= 0: + raise QuotaExceededError("No credits remaining") + if remaining_credits < credits_required: + raise QuotaExceededError("Insufficient credits remaining") + + pool.quota_used += credits_required + except QuotaExceededError: + raise except Exception: logger.exception("Failed to deduct credits for tenant %s", tenant_id) raise QuotaExceededError("Failed to deduct credits") - return actual_credits + return credits_required + + @classmethod + def deduct_credits_capped( + cls, + tenant_id: str, + credits_required: int, + pool_type: str = "trial", + ) -> int: + """Deduct up to the available balance and return the actual deducted credits.""" + if credits_required <= 0: + return 0 + + try: + with sessionmaker(db.engine, expire_on_commit=False).begin() as session: + pool = cls._get_locked_pool(session=session, tenant_id=tenant_id, pool_type=pool_type) + if not pool: + logger.warning("Credit pool not found, tenant_id=%s, pool_type=%s", tenant_id, pool_type) + return 0 + + deducted_credits = min(credits_required, pool.remaining_credits) + if deducted_credits <= 0: + return 0 + + pool.quota_used += deducted_credits + return deducted_credits + except QuotaExceededError: + raise + except Exception: + logger.exception("Failed to deduct capped credits for tenant %s", tenant_id) + raise QuotaExceededError("Failed to deduct credits") diff --git a/api/services/file_service.py b/api/services/file_service.py index f60afe2f19..b683a2f3d4 100644 --- a/api/services/file_service.py +++ b/api/services/file_service.py @@ -107,15 +107,14 @@ class FileService: hash=hashlib.sha3_256(content).hexdigest(), source_url=source_url, ) - # The `UploadFile` ID is generated within its constructor, so flushing to retrieve the ID is unnecessary. - # We can directly generate the `source_url` here before committing. - if not upload_file.source_url: - upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) with self._session_maker(expire_on_commit=False) as session: session.add(upload_file) session.commit() + if not upload_file.source_url: + upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) + return upload_file @staticmethod diff --git a/api/services/recommend_app/category_order.py b/api/services/recommend_app/category_order.py new file mode 100644 index 0000000000..be6b112aa4 --- /dev/null +++ b/api/services/recommend_app/category_order.py @@ -0,0 +1,49 @@ +"""Apply Redis-backed category ordering for DB-backed Explore apps.""" + +import json +import logging +from collections.abc import Collection +from typing import Any + +from extensions.ext_redis import redis_client + +logger = logging.getLogger(__name__) + +EXPLORE_APP_CATEGORY_ORDER_KEY_PREFIX = "explore:apps:category_order" + + +def _category_order_key(language: str) -> str: + return f"{EXPLORE_APP_CATEGORY_ORDER_KEY_PREFIX}:{language}" + + +def get_explore_app_category_order(language: str) -> list[str]: + try: + raw_categories = redis_client.get(_category_order_key(language)) + except Exception: + logger.exception("Failed to read explore app category order from Redis.") + return [] + + if not raw_categories: + return [] + + if isinstance(raw_categories, bytes): + raw_categories = raw_categories.decode("utf-8") + + try: + categories: Any = json.loads(raw_categories) + except (TypeError, json.JSONDecodeError): + logger.warning("Invalid explore app category order payload for language %s.", language) + return [] + + if not isinstance(categories, list): + return [] + + return [category for category in categories if isinstance(category, str)] + + +def order_categories(categories: Collection[str], language: str) -> list[str]: + configured_order = get_explore_app_category_order(language) + if configured_order: + return configured_order + + return sorted(categories) diff --git a/api/services/recommend_app/database/database_retrieval.py b/api/services/recommend_app/database/database_retrieval.py index 1df5fd13b6..ac870f0700 100644 --- a/api/services/recommend_app/database/database_retrieval.py +++ b/api/services/recommend_app/database/database_retrieval.py @@ -6,6 +6,7 @@ from constants.languages import languages from extensions.ext_database import db from models.model import App, RecommendedApp from services.app_dsl_service import AppDslService +from services.recommend_app.category_order import order_categories from services.recommend_app.recommend_app_base import RecommendAppRetrievalBase from services.recommend_app.recommend_app_type import RecommendAppType @@ -18,7 +19,7 @@ class RecommendedAppItemDict(TypedDict): copyright: Any privacy_policy: Any custom_disclaimer: str - category: str + categories: list[str] position: int is_listed: bool @@ -80,6 +81,7 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase): if not site: continue + app_categories = recommended_app.categories or [] recommended_app_result: RecommendedAppItemDict = { "id": recommended_app.id, "app": recommended_app.app, @@ -88,15 +90,18 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase): "copyright": site.copyright, "privacy_policy": site.privacy_policy, "custom_disclaimer": site.custom_disclaimer, - "category": recommended_app.category, + "categories": app_categories, "position": recommended_app.position, "is_listed": recommended_app.is_listed, } recommended_apps_result.append(recommended_app_result) - categories.add(recommended_app.category) + categories.update(app_categories) - return RecommendedAppsResultDict(recommended_apps=recommended_apps_result, categories=sorted(categories)) + return RecommendedAppsResultDict( + recommended_apps=recommended_apps_result, + categories=order_categories(categories, language), + ) @classmethod def fetch_recommended_app_detail_from_db(cls, app_id: str) -> RecommendedAppDetailDict | None: diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index b8242ab3a5..20de1f4058 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -408,7 +408,7 @@ class BuiltinToolManageService: return {"result": "success"} @staticmethod - def set_default_provider(tenant_id: str, user_id: str, provider: str, id: str): + def set_default_provider(tenant_id: str, provider: str, id: str): """ set default provider """ @@ -422,12 +422,11 @@ class BuiltinToolManageService: if target_provider is None: raise ValueError("provider not found") - # clear default provider + # clear default provider (tenant-scoped: only one default per provider per workspace) session.execute( update(BuiltinToolProvider) .where( BuiltinToolProvider.tenant_id == tenant_id, - BuiltinToolProvider.user_id == user_id, BuiltinToolProvider.provider == provider, BuiltinToolProvider.is_default.is_(True), ) diff --git a/api/services/variable_truncator.py b/api/services/variable_truncator.py index 1529c2b98f..5dd5f6873f 100644 --- a/api/services/variable_truncator.py +++ b/api/services/variable_truncator.py @@ -194,14 +194,15 @@ class VariableTruncator(BaseTruncator): result: _PartResult[Any] # Apply type-specific truncation with target size - if isinstance(segment, ArraySegment): - result = self._truncate_array(segment.value, target_size) - elif isinstance(segment, StringSegment): - result = self._truncate_string(segment.value, target_size) - elif isinstance(segment, ObjectSegment): - result = self._truncate_object(segment.value, target_size) - else: - raise AssertionError("this should be unreachable.") + match segment: + case ArraySegment(): + result = self._truncate_array(segment.value, target_size) + case StringSegment(): + result = self._truncate_string(segment.value, target_size) + case ObjectSegment(): + result = self._truncate_object(segment.value, target_size) + case _: + raise AssertionError("this should be unreachable.") return _PartResult( value=segment.model_copy(update={"value": result.value}), @@ -219,40 +220,41 @@ class VariableTruncator(BaseTruncator): return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) if depth > _MAX_DEPTH: raise MaxDepthExceededError() - if isinstance(value, str): - # Ideally, the size of strings should be calculated based on their utf-8 encoded length. - # However, this adds complexity as we would need to compute encoded sizes consistently - # throughout the code. Therefore, we approximate the size using the string's length. - # Rough estimate: number of characters, plus 2 for quotes - return len(value) + 2 - elif isinstance(value, (int, float)): - return len(str(value)) - elif isinstance(value, bool): - return 4 if value else 5 # "true" or "false" - elif value is None: - return 4 # "null" - elif isinstance(value, list): - # Size = sum of elements + separators + brackets - total = 2 # "[]" - for i, item in enumerate(value): - if i > 0: - total += 1 # "," - total += VariableTruncator.calculate_json_size(item, depth=depth + 1) - return total - elif isinstance(value, dict): - # Size = sum of keys + values + separators + brackets - total = 2 # "{}" - for index, key in enumerate(value.keys()): - if index > 0: - total += 1 # "," - total += VariableTruncator.calculate_json_size(str(key), depth=depth + 1) # Key as string - total += 1 # ":" - total += VariableTruncator.calculate_json_size(value[key], depth=depth + 1) - return total - elif isinstance(value, File): - return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) - else: - raise UnknownTypeError(f"got unknown type {type(value)}") + match value: + case str(): + # Ideally, the size of strings should be calculated based on their utf-8 encoded length. + # However, this adds complexity as we would need to compute encoded sizes consistently + # throughout the code. Therefore, we approximate the size using the string's length. + # Rough estimate: number of characters, plus 2 for quotes + return len(value) + 2 + case bool(): + return 4 if value else 5 # "true" or "false" + case int() | float(): + return len(str(value)) + case None: + return 4 # "null" + case list(): + # Size = sum of elements + separators + brackets + total = 2 # "[]" + for i, item in enumerate(value): + if i > 0: + total += 1 # "," + total += VariableTruncator.calculate_json_size(item, depth=depth + 1) + return total + case dict(): + # Size = sum of keys + values + separators + brackets + total = 2 # "{}" + for index, key in enumerate(value.keys()): + if index > 0: + total += 1 # "," + total += VariableTruncator.calculate_json_size(str(key), depth=depth + 1) # Key as string + total += 1 # ":" + total += VariableTruncator.calculate_json_size(value[key], depth=depth + 1) + return total + case File(): + return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) + case _: + raise UnknownTypeError(f"got unknown type {type(value)}") def _truncate_string(self, value: str, target_size: int) -> _PartResult[str]: if (size := self.calculate_json_size(value)) < target_size: @@ -419,22 +421,23 @@ class VariableTruncator(BaseTruncator): target_size: int, ) -> _PartResult[Any]: """Truncate a value within an object to fit within budget.""" - if isinstance(val, UpdatedVariable): - # TODO(Workflow): push UpdatedVariable normalization closer to its producer. - return self._truncate_object(val.model_dump(), target_size) - elif isinstance(val, str): - return self._truncate_string(val, target_size) - elif isinstance(val, list): - return self._truncate_array(val, target_size) - elif isinstance(val, dict): - return self._truncate_object(val, target_size) - elif isinstance(val, File): - # File objects should not be truncated, return as-is - return _PartResult(val, self.calculate_json_size(val), False) - elif val is None or isinstance(val, (bool, int, float)): - return _PartResult(val, self.calculate_json_size(val), False) - else: - raise AssertionError("this statement should be unreachable.") + match val: + case UpdatedVariable(): + # TODO(Workflow): push UpdatedVariable normalization closer to its producer. + return self._truncate_object(val.model_dump(), target_size) + case str(): + return self._truncate_string(val, target_size) + case list(): + return self._truncate_array(val, target_size) + case dict(): + return self._truncate_object(val, target_size) + case File(): + # File objects should not be truncated, return as-is + return _PartResult(val, self.calculate_json_size(val), False) + case None | bool() | int() | float(): + return _PartResult(val, self.calculate_json_size(val), False) + case _: + raise AssertionError("this statement should be unreachable.") class DummyVariableTruncator(BaseTruncator): diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py index a55448e352..59db147576 100644 --- a/api/services/workflow_draft_variable_service.py +++ b/api/services/workflow_draft_variable_service.py @@ -157,8 +157,8 @@ class DraftVarLoader(VariableLoader): # This approach reduces loading time by querying external systems concurrently. with ThreadPoolExecutor(max_workers=10) as executor: offloaded_variables = executor.map(self._load_offloaded_variable, offloaded_draft_vars) - for selector, variable in offloaded_variables: - variable_by_selector[selector] = variable + for selector, offloaded_variable in offloaded_variables: + variable_by_selector[selector] = offloaded_variable return list(variable_by_selector.values()) diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index f97b85dc2b..b8c2ed5e6f 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -1251,7 +1251,7 @@ class WorkflowService: node_data = HumanInputNode.validate_node_data(adapt_human_input_node_data_for_graph(node_config["data"])) node = HumanInputNode( node_id=node_config["id"], - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, runtime=DifyHumanInputNodeRuntime(run_context), diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index 3b5e822b90..90131fe98d 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -13,7 +13,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import ConversationFromSource +from models.enums import AppStatus, ConversationFromSource from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -28,7 +28,7 @@ class TestChatMessageApiPermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL return app @pytest.fixture @@ -78,7 +78,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -130,7 +130,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py index 309a0b015a..c4db0d5111 100644 --- a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py +++ b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py @@ -14,7 +14,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import FeedbackFromSource, FeedbackRating +from models.enums import AppStatus, FeedbackFromSource, FeedbackRating from models.model import AppMode, MessageFeedback from services.feedback_service import FeedbackService @@ -29,7 +29,7 @@ class TestFeedbackExportApi: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.name = "Test App" return app @@ -135,7 +135,7 @@ class TestFeedbackExportApi: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -167,7 +167,13 @@ class TestFeedbackExportApi: mock_export_feedbacks.assert_called_once() def test_feedback_export_csv_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in CSV format.""" @@ -202,7 +208,13 @@ class TestFeedbackExportApi: assert "text/csv" in response.content_type def test_feedback_export_json_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in JSON format.""" @@ -246,7 +258,7 @@ class TestFeedbackExportApi: assert "application/json" in response.content_type def test_feedback_export_with_filters( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with various filters.""" @@ -287,7 +299,7 @@ class TestFeedbackExportApi: ) def test_feedback_export_invalid_date_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with invalid date format.""" @@ -312,7 +324,7 @@ class TestFeedbackExportApi: assert "Parameter validation error" in response_json["error"] def test_feedback_export_server_error( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with server error.""" diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index 04945e57a0..ab08c7a6d8 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -11,6 +11,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole +from models.enums import AppStatus from models.model import AppMode from services.app_model_config_service import AppModelConfigService @@ -25,7 +26,7 @@ class TestModelConfigResourcePermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.app_model_config_id = str(uuid.uuid4()) return app @@ -73,7 +74,7 @@ class TestModelConfigResourcePermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py index a876b0c4aa..7d0b575262 100644 --- a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py +++ b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py @@ -1,5 +1,7 @@ from collections.abc import Generator +from pytest_mock import MockerFixture + from core.datasource.datasource_manager import DatasourceManager from core.datasource.entities.datasource_entities import DatasourceMessage from graphon.node_events import StreamCompletedEvent @@ -19,7 +21,7 @@ def _gen_var_stream() -> Generator[DatasourceMessage, None, None]: ) -def test_stream_node_events_accumulates_variables(mocker): +def test_stream_node_events_accumulates_variables(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_var_stream()) events = list( DatasourceManager.stream_node_events( diff --git a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py index 2392084c36..b9f09ccadd 100644 --- a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py +++ b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GP: call_depth = 0 -def test_node_integration_minimal_stream(mocker): +def test_node_integration_minimal_stream(mocker: MockerFixture): sys_d = { "sys": { "datasource_type": "online_document", @@ -71,7 +73,7 @@ def test_node_integration_minimal_stream(mocker): node = DatasourceNode( node_id="n", - config=DatasourceNodeData( + data=DatasourceNodeData( type="datasource", version="1", title="Datasource", diff --git a/api/tests/integration_tests/workflow/nodes/__mock/model.py b/api/tests/integration_tests/workflow/nodes/__mock/model.py index a9a2617bae..a77fe5970a 100644 --- a/api/tests/integration_tests/workflow/nodes/__mock/model.py +++ b/api/tests/integration_tests/workflow/nodes/__mock/model.py @@ -4,7 +4,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration from core.model_manager import ModelInstance -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from graphon.model_runtime.entities.model_entities import ModelType from models.provider import ProviderType @@ -15,8 +15,9 @@ def get_mocked_fetch_model_config( mode: str, credentials: dict, ): - model_provider_factory = create_plugin_model_provider_factory(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b") - model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM) + model_assembly = create_plugin_model_assembly(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b") + model_provider_factory = model_assembly.model_provider_factory + model_type_instance = model_assembly.create_model_type_instance(provider=provider, model_type=ModelType.LLM) provider_model_bundle = ProviderModelBundle( configuration=ProviderConfiguration( tenant_id="1", diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py index aaa6092993..9345113aa3 100644 --- a/api/tests/integration_tests/workflow/nodes/test_code.py +++ b/api/tests/integration_tests/workflow/nodes/test_code.py @@ -45,7 +45,7 @@ def init_code_node(code_config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -66,7 +66,7 @@ def init_code_node(code_config: dict): node = CodeNode( node_id=str(uuid.uuid4()), - config=CodeNodeData.model_validate(code_config["data"]), + data=CodeNodeData.model_validate(code_config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, code_executor=node_factory._code_executor, diff --git a/api/tests/integration_tests/workflow/nodes/test_http.py b/api/tests/integration_tests/workflow/nodes/test_http.py index b9f7b9575b..7cd7f50b77 100644 --- a/api/tests/integration_tests/workflow/nodes/test_http.py +++ b/api/tests/integration_tests/workflow/nodes/test_http.py @@ -55,7 +55,7 @@ def init_http_node(config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -76,7 +76,7 @@ def init_http_node(config: dict): node = HttpRequestNode( node_id=str(uuid.uuid4()), - config=HttpRequestNodeData.model_validate(config["data"]), + data=HttpRequestNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, @@ -204,7 +204,7 @@ def test_custom_auth_with_empty_api_key_raises_error(setup_http_mock): from graphon.runtime import VariablePool # Create variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="test", files=[]), user_inputs={}, environment_variables=[], @@ -702,7 +702,7 @@ def test_nested_object_variable_selector(setup_http_mock): ) # Create independent variable pool for this test only - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -724,7 +724,7 @@ def test_nested_object_variable_selector(setup_http_mock): node = HttpRequestNode( node_id=str(uuid.uuid4()), - config=HttpRequestNodeData.model_validate(graph_config["nodes"][1]["data"]), + data=HttpRequestNodeData.model_validate(graph_config["nodes"][1]["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py index 3eead70163..92f3a1926c 100644 --- a/api/tests/integration_tests/workflow/nodes/test_llm.py +++ b/api/tests/integration_tests/workflow/nodes/test_llm.py @@ -53,7 +53,7 @@ def init_llm_node(config: dict) -> LLMNode: ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="aaa", app_id=app_id, @@ -77,7 +77,7 @@ def init_llm_node(config: dict) -> LLMNode: node = LLMNode( node_id=str(uuid.uuid4()), - config=LLMNodeData.model_validate(config["data"]), + data=LLMNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, credentials_provider=MagicMock(spec=CredentialsProvider), diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py index f2eabb86c3..f11188323a 100644 --- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py +++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py @@ -56,7 +56,7 @@ def init_parameter_extractor_node(config: dict, memory=None): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="aaa", files=[], query="what's the weather in SF", conversation_id="abababa" ), @@ -71,7 +71,7 @@ def init_parameter_extractor_node(config: dict, memory=None): node = ParameterExtractorNode( node_id=str(uuid.uuid4()), - config=ParameterExtractorNodeData.model_validate(config["data"]), + data=ParameterExtractorNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, credentials_provider=MagicMock(spec=CredentialsProvider), diff --git a/api/tests/integration_tests/workflow/nodes/test_template_transform.py b/api/tests/integration_tests/workflow/nodes/test_template_transform.py index e2e0723fb8..80489e6809 100644 --- a/api/tests/integration_tests/workflow/nodes/test_template_transform.py +++ b/api/tests/integration_tests/workflow/nodes/test_template_transform.py @@ -66,7 +66,7 @@ def test_execute_template_transform(): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -88,7 +88,7 @@ def test_execute_template_transform(): node = TemplateTransformNode( node_id=str(uuid.uuid4()), - config=TemplateTransformNodeData.model_validate(config["data"]), + data=TemplateTransformNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, jinja2_template_renderer=_SimpleJinja2Renderer(), diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index a8e9422c1e..78c12e7ea5 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -2,6 +2,8 @@ import time import uuid from unittest.mock import MagicMock, patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.tools.utils.configuration import ToolParameterConfigurationManager from core.workflow.node_factory import DifyNodeFactory @@ -41,7 +43,7 @@ def init_tool_node(config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -62,7 +64,7 @@ def init_tool_node(config: dict): node = ToolNode( node_id=str(uuid.uuid4()), - config=ToolNodeData.model_validate(config["data"]), + data=ToolNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, tool_file_manager_factory=tool_file_manager_factory, @@ -71,7 +73,7 @@ def init_tool_node(config: dict): return node -def test_tool_variable_invoke(monkeypatch): +def test_tool_variable_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", @@ -106,7 +108,7 @@ def test_tool_variable_invoke(monkeypatch): assert item.node_run_result.outputs.get("text") is not None -def test_tool_mixed_invoke(monkeypatch): +def test_tool_mixed_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", diff --git a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py index bd13527e14..66b3392a4b 100644 --- a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py +++ b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py @@ -210,7 +210,9 @@ class TestPauseStatePersistenceLayerTestContainers: execution_id = workflow_run_id or getattr(self, "test_workflow_run_id", None) or str(uuid.uuid4()) # Create variable pool - variable_pool = VariablePool(system_variables=build_system_variables(workflow_execution_id=execution_id)) + variable_pool = VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id=execution_id) + ) if variables: for (node_id, var_key), value in variables.items(): variable_pool.add([node_id, var_key], value) diff --git a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py index 5aed230cd4..ad82b8fe2a 100644 --- a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py +++ b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py @@ -66,7 +66,7 @@ def _mock_form_repository_with_submission(action_id: str) -> HumanInputFormRepos def _build_runtime_state(workflow_execution_id: str, app_id: str, workflow_id: str, user_id: str) -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( workflow_execution_id=workflow_execution_id, app_id=app_id, @@ -102,7 +102,7 @@ def _build_graph( start_data = StartNodeData(title="start", variables=[]) start_node = StartNode( node_id="start", - config=start_data, + data=start_data, graph_init_params=params, graph_runtime_state=runtime_state, ) @@ -117,7 +117,7 @@ def _build_graph( ) human_node = HumanInputNode( node_id="human", - config=human_data, + data=human_data, graph_init_params=params, graph_runtime_state=runtime_state, form_repository=form_repository, @@ -131,7 +131,7 @@ def _build_graph( ) end_node = EndNode( node_id="end", - config=end_data, + data=end_data, graph_init_params=params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py index 178fc2e4fb..390795486b 100644 --- a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py +++ b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py @@ -11,7 +11,7 @@ from libs import helper as helper_module @pytest.mark.usefixtures("flask_app_with_containers") -def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch): +def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch: pytest.MonkeyPatch): prefix = f"test_rate_limit:{uuid.uuid4().hex}" limiter = helper_module.RateLimiter(prefix=prefix, max_attempts=2, time_window=60) key = limiter._get_key("203.0.113.10") diff --git a/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py b/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py index 724dd19f92..11e864176a 100644 --- a/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py +++ b/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py @@ -47,6 +47,7 @@ def _create_recommended_app( *, app_id: str, category: str = "chat", + categories: list[str] | None = None, language: str = "en-US", is_listed: bool = True, position: int = 1, @@ -57,6 +58,7 @@ def _create_recommended_app( copyright="copy", privacy_policy="pp", category=category, + categories=[category] if categories is None else categories, language=language, is_listed=is_listed, position=position, @@ -113,6 +115,53 @@ class TestFetchRecommendedAppsFromDb: assert "assistant" in result["categories"] assert "writing" in result["categories"] + def test_returns_multiple_categories_for_one_app( + self, flask_app_with_containers, db_session_with_containers: Session + ): + tenant_id = str(uuid4()) + created_app = _create_app(db_session_with_containers, tenant_id=tenant_id) + _create_site(db_session_with_containers, app_id=created_app.id) + _create_recommended_app( + db_session_with_containers, + app_id=created_app.id, + category="writing", + categories=["writing", "assistant"], + ) + + db_session_with_containers.expire_all() + + result = DatabaseRecommendAppRetrieval.fetch_recommended_apps_from_db("en-US") + + recommended_app = next(item for item in result["recommended_apps"] if item["app_id"] == created_app.id) + assert recommended_app["categories"] == ["writing", "assistant"] + assert "writing" in result["categories"] + assert "assistant" in result["categories"] + + def test_ignores_legacy_category_when_categories_are_empty( + self, + flask_app_with_containers, + db_session_with_containers: Session, + ): + legacy_category = f"legacy-empty-{uuid4()}" + tenant_id = str(uuid4()) + created_app = _create_app(db_session_with_containers, tenant_id=tenant_id) + _create_site(db_session_with_containers, app_id=created_app.id) + _create_recommended_app( + db_session_with_containers, + app_id=created_app.id, + category=legacy_category, + categories=[], + ) + + db_session_with_containers.expire_all() + + result = DatabaseRecommendAppRetrieval.fetch_recommended_apps_from_db("en-US") + + recommended_app = next(item for item in result["recommended_apps"] if item["app_id"] == created_app.id) + assert "category" not in recommended_app + assert recommended_app["categories"] == [] + assert legacy_category not in result["categories"] + def test_falls_back_to_default_language_when_empty( self, flask_app_with_containers, db_session_with_containers: Session ): diff --git a/api/tests/test_containers_integration_tests/services/test_agent_service.py b/api/tests/test_containers_integration_tests/services/test_agent_service.py index 00a2f9a59f..cbd939c7a4 100644 --- a/api/tests/test_containers_integration_tests/services/test_agent_service.py +++ b/api/tests/test_containers_integration_tests/services/test_agent_service.py @@ -6,7 +6,7 @@ from faker import Faker from sqlalchemy.orm import Session from core.plugin.impl.exc import PluginDaemonClientSideError -from models import Account +from models import Account, CreatorUserRole from models.enums import ConversationFromSource, MessageFileBelongsTo from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought from services.account_service import AccountService, TenantService @@ -246,7 +246,7 @@ class TestAgentService: tool_input=json.dumps({"test_tool": {"input": "test_input"}}), observation=json.dumps({"test_tool": {"output": "test_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought1) @@ -294,7 +294,7 @@ class TestAgentService: agent_thoughts = self._create_test_agent_thoughts(db_session_with_containers, message) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result structure assert result is not None @@ -370,7 +370,7 @@ class TestAgentService: # Execute the method under test with non-existent message with pytest.raises(ValueError, match="Message not found"): - AgentService.get_agent_logs(app, str(conversation.id), fake.uuid4()) + AgentService.get_agent_logs(app, conversation.id, fake.uuid4()) def test_get_agent_logs_with_end_user( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -451,7 +451,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -523,7 +523,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -561,14 +561,14 @@ class TestAgentService: tool_input=json.dumps({"error_tool": {"input": "test_input"}}), observation=json.dumps({"error_tool": {"output": "error_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_error) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -592,7 +592,7 @@ class TestAgentService: conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -654,7 +654,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="App model config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_get_agent_logs_agent_config_not_found( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -673,7 +673,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="Agent config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_list_agent_providers_success( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -687,7 +687,7 @@ class TestAgentService: app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) # Execute the method under test - result = AgentService.list_agent_providers(str(account.id), str(app.tenant_id)) + result = AgentService.list_agent_providers(account.id, app.tenant_id) # Verify the result assert result is not None @@ -696,7 +696,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(str(app.tenant_id)) + mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(app.tenant_id) def test_get_agent_provider_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ @@ -710,7 +710,7 @@ class TestAgentService: provider_name = "test_provider" # Execute the method under test - result = AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + result = AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) # Verify the result assert result is not None @@ -718,7 +718,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(str(app.tenant_id), provider_name) + mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(app.tenant_id, provider_name) def test_get_agent_provider_plugin_error( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -740,7 +740,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match=error_message): - AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) def test_get_agent_logs_with_complex_tool_data( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -796,14 +796,14 @@ class TestAgentService: {"tool1": {"output1": "result1"}, "tool2": {"output2": "result2"}, "tool3": {"output3": "result3"}} ), tokens=100, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(complex_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -891,14 +891,14 @@ class TestAgentService: observation=json.dumps({"file_tool": {"output": "test_output"}}), message_files=json.dumps(["file1", "file2"]), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_files) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -926,7 +926,7 @@ class TestAgentService: mock_external_service_dependencies["current_user"].timezone = "Asia/Shanghai" # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -960,14 +960,14 @@ class TestAgentService: tool_input="", # Empty input observation="", # Empty observation tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(empty_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -1001,14 +1001,14 @@ class TestAgentService: tool_input="invalid json", # Malformed JSON observation="invalid json", # Malformed JSON tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(malformed_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result - should handle malformed JSON gracefully assert result is not None diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py index 7c5d2390ba..a5ec06dc13 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -198,7 +198,7 @@ class TestAppDslService: def test_check_version_compatibility_newer_version_returns_pending(self): assert _check_version_compatibility("99.0.0") == ImportStatus.PENDING - def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch): + def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(app_dsl_service, "CURRENT_DSL_VERSION", "1.0.0") assert _check_version_compatibility("0.9.9") == ImportStatus.PENDING @@ -272,7 +272,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Missing app data" in result.error - def test_import_app_yaml_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): def bad_safe_load(_content: str): raise yaml.YAMLError("bad") @@ -287,7 +289,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert result.error.startswith("Invalid YAML format:") - def test_import_app_unexpected_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_unexpected_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( AppDslService, "_create_or_update_app", @@ -305,7 +309,9 @@ class TestAppDslService: # ── Import: YAML URL ────────────────────────────────────────────── - def test_import_app_yaml_url_fetch_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_fetch_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( app_dsl_service.ssrf_proxy, "get", @@ -321,7 +327,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Error fetching YAML from URL: boom" in result.error - def test_import_app_yaml_url_empty_content_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_empty_content_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"" response.raise_for_status.return_value = None @@ -336,7 +344,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Empty content" in result.error - def test_import_app_yaml_url_file_too_large_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_file_too_large_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"x" * (DSL_MAX_SIZE + 1) response.raise_for_status.return_value = None @@ -379,7 +389,9 @@ class TestAppDslService: assert result.imported_dsl_version == "99.0.0" assert requested_urls == [yaml_url] - def test_import_app_yaml_url_github_blob_rewrites_to_raw(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_github_blob_rewrites_to_raw( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): yaml_url = "https://github.com/acme/repo/blob/main/app.yml" raw_url = "https://raw.githubusercontent.com/acme/repo/main/app.yml" yaml_bytes = _pending_yaml_content() @@ -491,7 +503,7 @@ class TestAppDslService: @pytest.mark.parametrize("has_workflow", [True, False]) def test_import_app_legacy_versions_extract_dependencies( - self, db_session_with_containers: Session, monkeypatch, has_workflow: bool + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch, has_workflow: bool ): monkeypatch.setattr( AppDslService, @@ -554,7 +566,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "expired" in result.error - def test_confirm_import_success_deletes_redis_key(self, db_session_with_containers: Session, monkeypatch): + def test_confirm_import_success_deletes_redis_key( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" @@ -614,7 +628,9 @@ class TestAppDslService: result = service.check_dependencies(app_model=app_model) assert result.leaked_dependencies == [] - def test_check_dependencies_calls_analysis_service(self, db_session_with_containers: Session, monkeypatch): + def test_check_dependencies_calls_analysis_service( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): app_id = str(uuid4()) pending = CheckDependenciesPendingData(dependencies=[], app_id=app_id) redis_client.setex( @@ -665,7 +681,9 @@ class TestAppDslService: with pytest.raises(ValueError, match="loss app mode"): service._create_or_update_app(app=None, data={"app": {}}, account=_account_mock()) - def test_create_or_update_app_existing_app_updates_fields(self, db_session_with_containers: Session, monkeypatch): + def test_create_or_update_app_existing_app_updates_fields( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): fixed_now = object() monkeypatch.setattr(app_dsl_service, "naive_utc_now", lambda: fixed_now) @@ -778,8 +796,8 @@ class TestAppDslService: service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing model_config"): service._create_or_update_app( - app=_app_stub(mode=AppMode.CHAT.value), - data={"app": {"mode": AppMode.CHAT.value}}, + app=_app_stub(mode=AppMode.CHAT), + data={"app": {"mode": AppMode.CHAT}}, account=_account_mock(), ) @@ -794,7 +812,7 @@ class TestAppDslService: service._create_or_update_app( app=app, data={ - "app": {"mode": AppMode.CHAT.value}, + "app": {"mode": AppMode.CHAT}, "model_config": {"model": {"provider": "openai"}}, }, account=account, @@ -807,14 +825,14 @@ class TestAppDslService: service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid app mode"): service._create_or_update_app( - app=_app_stub(mode=AppMode.RAG_PIPELINE.value), - data={"app": {"mode": AppMode.RAG_PIPELINE.value}}, + app=_app_stub(mode=AppMode.RAG_PIPELINE), + data={"app": {"mode": AppMode.RAG_PIPELINE}}, account=_account_mock(), ) # ── Export ───────────────────────────────────────────────────────── - def test_export_dsl_delegates_by_mode(self, monkeypatch): + def test_export_dsl_delegates_by_mode(self, monkeypatch: pytest.MonkeyPatch): workflow_calls: list[bool] = [] model_calls: list[bool] = [] monkeypatch.setattr( @@ -836,14 +854,14 @@ class TestAppDslService: assert workflow_calls == [True] chat_app = _app_stub( - mode=AppMode.CHAT.value, + mode=AppMode.CHAT, icon_type="emoji", app_model_config=SimpleNamespace(to_dict=lambda: {"agent_mode": {"tools": []}}), ) AppDslService.export_dsl(chat_app) assert model_calls == [True] - def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch): + def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_append_workflow_export_data", @@ -1011,7 +1029,7 @@ class TestAppDslService: # ── Workflow Export Data ─────────────────────────────────────────── - def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch): + def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch: pytest.MonkeyPatch): workflow_dict = { "graph": { "nodes": [ @@ -1111,7 +1129,7 @@ class TestAppDslService: assert nodes[5]["data"]["subscription_id"] == "" assert export_data["dependencies"] == [{"tenant": _DEFAULT_TENANT_ID, "dep": "dep-1"}] - def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch): + def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch: pytest.MonkeyPatch): workflow_service = MagicMock() workflow_service.get_draft_workflow.return_value = None monkeypatch.setattr(app_dsl_service, "WorkflowService", lambda: workflow_service) @@ -1126,7 +1144,7 @@ class TestAppDslService: # ── Model Config Export Data ────────────────────────────────────── - def test_append_model_config_export_data_filters_credential_id(self, monkeypatch): + def test_append_model_config_export_data_filters_credential_id(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_extract_dependencies_from_model_config", @@ -1160,7 +1178,7 @@ class TestAppDslService: # ── Dependency Extraction ───────────────────────────────────────── - def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_tool_dependency", @@ -1230,7 +1248,7 @@ class TestAppDslService: "model:m4", ] - def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.ToolNodeData, "model_validate", @@ -1241,7 +1259,7 @@ class TestAppDslService: ) assert deps == [] - def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch): + def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1264,7 +1282,7 @@ class TestAppDslService: ) assert deps == ["model:p1", "model:p2", "tool:t1"] - def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1278,7 +1296,7 @@ class TestAppDslService: def test_get_leaked_dependencies_empty_returns_empty(self): assert AppDslService.get_leaked_dependencies(_DEFAULT_TENANT_ID, []) == [] - def test_get_leaked_dependencies_delegates(self, monkeypatch): + def test_get_leaked_dependencies_delegates(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "get_leaked_dependencies", @@ -1289,7 +1307,7 @@ class TestAppDslService: # ── Encryption/Decryption ───────────────────────────────────────── - def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch): + def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch: pytest.MonkeyPatch): tenant_id = _DEFAULT_TENANT_ID dataset_uuid = "00000000-0000-0000-0000-000000000000" @@ -1314,7 +1332,7 @@ class TestAppDslService: value = "00000000-0000-0000-0000-000000000000" assert AppDslService.decrypt_dataset_id(encrypted_data=value, tenant_id=_DEFAULT_TENANT_ID) == value - def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", @@ -1322,7 +1340,7 @@ class TestAppDslService: ) assert AppDslService.decrypt_dataset_id(encrypted_data="not-base64", tenant_id=_DEFAULT_TENANT_ID) is None - def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", diff --git a/api/tests/test_containers_integration_tests/services/test_app_service.py b/api/tests/test_containers_integration_tests/services/test_app_service.py index b695ae9fd9..837b63d1ea 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_service.py @@ -6,6 +6,7 @@ from sqlalchemy.orm import Session from constants.model_template import default_app_templates from models import Account +from models.enums import AppStatus, CustomizeTokenStrategy from models.model import App, IconType, Site from services.account_service import AccountService, TenantService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -1079,9 +1080,9 @@ class TestAppService: site.app_id = app.id site.code = fake.postalcode() site.title = fake.company() - site.status = "normal" + site.status = AppStatus.NORMAL site.default_language = "en-US" - site.customize_token_strategy = "uuid" + site.customize_token_strategy = CustomizeTokenStrategy.UUID db_session_with_containers.add(site) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_conversation_service.py index 8aa10129c1..5f3914eb19 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service.py @@ -10,6 +10,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.enums import ConversationFromSource from models.model import App, Conversation, EndUser, Message, MessageAnnotation @@ -22,7 +23,7 @@ from services.message_service import MessageService class ConversationServiceIntegrationTestDataFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -41,7 +42,7 @@ class ConversationServiceIntegrationTestDataFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -155,7 +156,7 @@ class ConversationServiceIntegrationTestDataFactory: total_price=Decimal(0), currency="USD", status="normal", - invoke_from=InvokeFrom.WEB_APP.value, + invoke_from=InvokeFrom.WEB_APP, from_source=ConversationFromSource.API if isinstance(user, EndUser) else ConversationFromSource.CONSOLE, from_end_user_id=user.id if isinstance(user, EndUser) else None, from_account_id=user.id if isinstance(user, Account) else None, diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py index 6c292dbc4b..853630ad65 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py @@ -25,7 +25,7 @@ from services.errors.conversation import ( class ConversationServiceVariableIntegrationFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py index 09ba041244..07dc3a4e9e 100644 --- a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py +++ b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py @@ -90,16 +90,34 @@ class TestCreditPoolService: pool = CreditPoolService.get_pool(tenant_id=tenant_id) assert pool.quota_used == credits_required - def test_check_and_deduct_credits_caps_at_remaining(self, db_session_with_containers: Session): + def test_check_and_deduct_credits_raises_without_deducting_when_insufficient( + self, db_session_with_containers: Session + ): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) remaining = 5 pool.quota_used = pool.quota_limit - remaining + quota_used = pool.quota_used db_session_with_containers.commit() - result = CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=200) + with pytest.raises(QuotaExceededError, match="Insufficient credits remaining"): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=200) + + db_session_with_containers.expire_all() + updated_pool = CreditPoolService.get_pool(tenant_id=tenant_id) + assert updated_pool.quota_used == quota_used + + def test_deduct_credits_capped_depletes_available_balance(self, db_session_with_containers: Session): + tenant_id = self._create_tenant_id() + pool = CreditPoolService.create_default_pool(tenant_id) + remaining = 5 + pool.quota_used = pool.quota_limit - remaining + quota_limit = pool.quota_limit + db_session_with_containers.commit() + + result = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=200) assert result == remaining db_session_with_containers.expire_all() updated_pool = CreditPoolService.get_pool(tenant_id=tenant_id) - assert updated_pool.quota_used == pool.quota_limit + assert updated_pool.quota_used == quota_limit diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py index 2bec703f0c..0c089e506b 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py @@ -6,6 +6,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound from core.rag.index_processor.constant.index_type import IndexStructureType @@ -119,13 +120,13 @@ def current_user_mock(): yield current_user -def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers): +def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_document(dataset.id, None) is None -def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers): +def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset) @@ -135,7 +136,7 @@ def test_get_document_queries_by_dataset_and_document_id(db_session_with_contain assert result.id == document.id -def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers): +def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) result = DocumentService.get_documents_by_ids(dataset.id, []) @@ -143,7 +144,7 @@ def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_cont assert result == [] -def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers): +def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) doc_a = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, name="a.txt") doc_b = DocumentServiceIntegrationFactory.create_document( @@ -158,13 +159,13 @@ def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers assert {document.id for document in result} == {doc_a.id, doc_b.id} -def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers): +def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.update_documents_need_summary(dataset.id, []) == 0 -def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers): +def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) paragraph_doc = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -195,7 +196,7 @@ def test_update_documents_need_summary_updates_matching_non_qa_documents(db_sess assert refreshed_qa.need_summary is True -def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers): +def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -215,7 +216,7 @@ def test_get_document_download_url_uses_signed_url_helper(db_session_with_contai get_url.assert_called_once_with(upload_file_id=upload_file.id, as_attachment=True) -def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -232,7 +233,9 @@ def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type ) -def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -248,7 +251,7 @@ def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file ) -def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -265,7 +268,9 @@ def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_sessio assert result == "99" -def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -278,7 +283,7 @@ def test_get_upload_file_for_upload_file_document_raises_when_file_service_retur DocumentService._get_upload_file_for_upload_file_document(document) -def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -296,7 +301,9 @@ def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session assert result.id == upload_file.id -def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with pytest.raises(NotFound, match="Document not found"): @@ -307,7 +314,9 @@ def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_doc ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -329,7 +338,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_a ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -345,7 +356,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload ) -def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file_a = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -395,7 +408,7 @@ def test_prepare_document_batch_download_zip_raises_not_found_for_missing_datase def test_prepare_document_batch_download_zip_translates_permission_error_to_forbidden( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -418,7 +431,7 @@ def test_prepare_document_batch_download_zip_translates_permission_error_to_forb def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_order( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -461,7 +474,7 @@ def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_o assert download_name.endswith(".zip") -def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers): +def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) enabled_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -480,7 +493,9 @@ def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_co assert [document.id for document in result] == [enabled_document.id] -def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents(db_session_with_containers): +def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) available_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -501,7 +516,7 @@ def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchive assert [document.id for document in result] == [available_document.id] -def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers): +def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) error_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -526,7 +541,7 @@ def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db assert {document.id for document in result} == {error_document.id, paused_document.id} -def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers): +def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) batch = f"batch-{uuid4()}" matching_document = DocumentServiceIntegrationFactory.create_document( @@ -549,7 +564,7 @@ def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_cont assert [document.id for document in result] == [matching_document.id] -def test_get_document_file_detail_returns_upload_file(db_session_with_containers): +def test_get_document_file_detail_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -563,7 +578,7 @@ def test_get_document_file_detail_returns_upload_file(db_session_with_containers assert result.id == upload_file.id -def test_delete_document_emits_signal_and_commits(db_session_with_containers): +def test_delete_document_emits_signal_and_commits(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -588,7 +603,7 @@ def test_delete_document_emits_signal_and_commits(db_session_with_containers): ) -def test_delete_documents_ignores_empty_input(db_session_with_containers): +def test_delete_documents_ignores_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with patch("services.dataset_service.batch_clean_document_task.delay") as delay: @@ -597,7 +612,7 @@ def test_delete_documents_ignores_empty_input(db_session_with_containers): delay.assert_not_called() -def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers): +def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) dataset.chunk_structure = IndexStructureType.PARAGRAPH_INDEX db_session_with_containers.commit() @@ -637,14 +652,14 @@ def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_wi assert set(args[3]) == {upload_file_a.id, upload_file_b.id} -def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers): +def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, position=3) assert DocumentService.get_documents_position(dataset.id) == 4 -def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers): +def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_documents_position(dataset.id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py index c0047df810..383a5f6374 100644 --- a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py +++ b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py @@ -2,6 +2,7 @@ import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document @@ -58,7 +59,7 @@ def _create_document( return document -def test_build_display_status_filters_available(db_session_with_containers): +def test_build_display_status_filters_available(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) available_doc = _create_document( db_session_with_containers, @@ -97,7 +98,7 @@ def test_build_display_status_filters_available(db_session_with_containers): assert [row.id for row in rows] == [available_doc.id] -def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers): +def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) waiting_doc = _create_document( db_session_with_containers, @@ -121,7 +122,7 @@ def test_apply_display_status_filter_applies_when_status_present(db_session_with assert [row.id for row in rows] == [waiting_doc.id] -def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers): +def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) doc1 = _create_document( db_session_with_containers, diff --git a/api/tests/test_containers_integration_tests/services/test_end_user_service.py b/api/tests/test_containers_integration_tests/services/test_end_user_service.py index 074d448aab..3f611d92f7 100644 --- a/api/tests/test_containers_integration_tests/services/test_end_user_service.py +++ b/api/tests/test_containers_integration_tests/services/test_end_user_service.py @@ -7,6 +7,7 @@ import pytest from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.model import App, DefaultEndUserSessionID, EndUser from services.end_user_service import EndUserService @@ -16,7 +17,7 @@ class TestEndUserServiceFactory: """Factory class for creating test data and mock objects for end user service tests.""" @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -35,7 +36,7 @@ class TestEndUserServiceFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) diff --git a/api/tests/test_containers_integration_tests/services/test_feature_service.py b/api/tests/test_containers_integration_tests/services/test_feature_service.py index f78aeaf984..a678e37b41 100644 --- a/api/tests/test_containers_integration_tests/services/test_feature_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feature_service.py @@ -644,7 +644,7 @@ class TestFeatureService: assert result.max_plugin_package_size == 15728640 # Verify default license status - assert result.license.status.value == "none" + assert result.license.status == "none" assert result.license.expired_at == "" assert result.license.workspaces.enabled is False diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index 3dcd6586e2..a4663450d4 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -23,7 +23,7 @@ class TestFeedbackService: """Test FeedbackService methods.""" @pytest.fixture - def mock_db_session(self, monkeypatch): + def mock_db_session(self, monkeypatch: pytest.MonkeyPatch): """Mock database session.""" mock_session = mock.Mock() monkeypatch.setattr(db, "session", mock_session) diff --git a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py index ce63e7a71a..bfc2af6509 100644 --- a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py +++ b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py @@ -122,7 +122,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestUnsupportedError): handler.send_test(context=MagicMock(), method=MagicMock()) - def test_send_test_feature_disabled(self, monkeypatch): + def test_send_test_feature_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -137,7 +137,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Email delivery is not available"): handler.send_test(context=context, method=method) - def test_send_test_mail_not_inited(self, monkeypatch): + def test_send_test_mail_not_inited(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -154,7 +154,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Mail client is not initialized."): handler.send_test(context=context, method=method) - def test_send_test_no_recipients(self, monkeypatch): + def test_send_test_no_recipients(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -173,7 +173,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="No recipients configured"): handler.send_test(context=context, method=method) - def test_send_test_success(self, monkeypatch): + def test_send_test_success(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -209,7 +209,7 @@ class TestEmailDeliveryTestHandler: assert kwargs["to"] == "test@example.com" assert "RENDERED_Subj" in kwargs["subject"] - def test_send_test_sanitizes_subject(self, monkeypatch): + def test_send_test_sanitizes_subject(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", diff --git a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py index 44e5a82868..52ebc0131f 100644 --- a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py +++ b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest +from sqlalchemy.orm import Session from services.message_service import MessageService from tests.test_containers_integration_tests.helpers.execution_extra_content import ( @@ -9,7 +10,7 @@ from tests.test_containers_integration_tests.helpers.execution_extra_content imp @pytest.mark.usefixtures("flask_req_ctx_with_containers") -def test_pagination_returns_extra_contents(db_session_with_containers): +def test_pagination_returns_extra_contents(db_session_with_containers: Session): fixture = create_human_input_message_fixture(db_session_with_containers) pagination = MessageService.pagination_by_first_id( diff --git a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py index 80289c448a..a8d295e6a9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py @@ -16,7 +16,7 @@ from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from extensions.ext_redis import redis_client -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.create_segment_to_index_task import create_segment_to_index_task @@ -73,7 +73,7 @@ class TestCreateSegmentToIndexTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -82,7 +82,7 @@ class TestCreateSegmentToIndexTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, plan="basic", ) db_session_with_containers.add(tenant) diff --git a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py index a5a3cd10b5..5287cd06db 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py @@ -12,7 +12,7 @@ from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -54,7 +54,7 @@ class _TrackedSessionContext: @pytest.fixture(autouse=True) -def _ensure_testcontainers_db(db_session_with_containers): +def _ensure_testcontainers_db(db_session_with_containers: Session): """Ensure this suite always runs on testcontainers infrastructure.""" return db_session_with_containers @@ -121,12 +121,12 @@ class TestDatasetIndexingTaskIntegration: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.flush() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py index ff72232d12..c4895839c9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py @@ -5,6 +5,7 @@ from faker import Faker from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_account_deletion_task import send_account_deletion_verification_code, send_deletion_success_task @@ -55,7 +56,7 @@ class TestMailAccountDeletionTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py index 8e9da6aaaa..0eec166fe2 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py @@ -18,6 +18,7 @@ from sqlalchemy import delete from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import AccountStatus, TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_email_code_login import send_email_code_login_mail_task @@ -91,7 +92,7 @@ class TestSendEmailCodeLoginMailTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -120,7 +121,7 @@ class TestSendEmailCodeLoginMailTask: tenant = Tenant( name=fake.company(), plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py index f505361727..a452bee9f8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py @@ -31,7 +31,7 @@ from tasks.mail_human_input_delivery_task import dispatch_human_input_email_task @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(HumanInputFormRecipient)) db_session_with_containers.execute(delete(HumanInputDelivery)) db_session_with_containers.execute(delete(HumanInputForm)) @@ -43,7 +43,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_workspace_member(db_session_with_containers): +def _create_workspace_member(db_session_with_containers: Session): account = Account( email="owner@example.com", name="Owner", diff --git a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py index 03c02ea341..204f533978 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py @@ -21,7 +21,7 @@ from tasks.remove_app_and_related_data_task import ( @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(WorkflowDraftVariable)) db_session_with_containers.execute(delete(WorkflowDraftVariableFile)) db_session_with_containers.execute(delete(UploadFile)) @@ -30,7 +30,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_tenant_and_app(db_session_with_containers): +def _create_tenant_and_app(db_session_with_containers: Session): tenant = Tenant(name=f"test_tenant_{uuid.uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..62d3d79cf1 --- /dev/null +++ b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py @@ -0,0 +1,103 @@ +"""Unit tests for the Markdown API docs generator.""" + +import importlib.util +import sys +from pathlib import Path + + +def _load_generate_swagger_markdown_docs_module(): + api_dir = Path(__file__).resolve().parents[3] + script_path = api_dir / "dev" / "generate_swagger_markdown_docs.py" + + spec = importlib.util.spec_from_file_location("generate_swagger_markdown_docs", script_path) + assert spec + assert spec.loader + + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) # type: ignore[attr-defined] + return module + + +def test_generate_markdown_docs_keeps_split_docs_and_merges_fastopenapi_into_console(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "openapi" + markdown_dir = tmp_path / "markdown" + stale_combined_doc = markdown_dir / "api-reference.md" + markdown_dir.mkdir() + stale_combined_doc.write_text("stale", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n\n## Routes\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + written_paths = module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert [path.name for path in written_paths] == [ + "console-swagger.md", + "web-swagger.md", + "service-swagger.md", + ] + assert not stale_combined_doc.exists() + assert not list(swagger_dir.glob("*.json")) + + console_markdown = (markdown_dir / "console-swagger.md").read_text(encoding="utf-8") + assert "## FastOpenAPI Preview (OpenAPI 3.0)" in console_markdown + assert "### fastopenapi-console-openapi" in console_markdown + assert "#### Routes" in console_markdown + assert "FastOpenAPI Preview" not in (markdown_dir / "web-swagger.md").read_text(encoding="utf-8") + assert "FastOpenAPI Preview" not in (markdown_dir / "service-swagger.md").read_text(encoding="utf-8") + + +def test_generate_markdown_docs_only_removes_generated_specs_from_separate_swagger_dir(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "swagger" + markdown_dir = tmp_path / "markdown" + swagger_dir.mkdir() + existing_file = swagger_dir / "existing.txt" + existing_file.write_text("keep me", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert existing_file.read_text(encoding="utf-8") == "keep me" + assert not list(swagger_dir.glob("*.json")) diff --git a/api/tests/unit_tests/commands/test_generate_swagger_specs.py b/api/tests/unit_tests/commands/test_generate_swagger_specs.py index e77e875081..79a577087d 100644 --- a/api/tests/unit_tests/commands/test_generate_swagger_specs.py +++ b/api/tests/unit_tests/commands/test_generate_swagger_specs.py @@ -6,6 +6,16 @@ import sys from pathlib import Path +def _walk_values(value): + yield value + if isinstance(value, dict): + for child in value.values(): + yield from _walk_values(child) + elif isinstance(value, list): + for child in value: + yield from _walk_values(child) + + def _load_generate_swagger_specs_module(): api_dir = Path(__file__).resolve().parents[3] script_path = api_dir / "dev" / "generate_swagger_specs.py" @@ -35,3 +45,32 @@ def test_generate_specs_writes_console_web_and_service_swagger_files(tmp_path): payload = json.loads(path.read_text(encoding="utf-8")) assert payload["swagger"] == "2.0" assert "paths" in payload + + +def test_generate_specs_writes_swagger_with_resolvable_references_and_no_nulls(tmp_path): + module = _load_generate_swagger_specs_module() + + written_paths = module.generate_specs(tmp_path) + + for path in written_paths: + payload = json.loads(path.read_text(encoding="utf-8")) + definitions = payload["definitions"] + refs = { + item["$ref"].removeprefix("#/definitions/") + for item in _walk_values(payload) + if isinstance(item, dict) and isinstance(item.get("$ref"), str) + } + + assert refs <= set(definitions) + assert all(value is not None for value in _walk_values(payload)) + + +def test_generate_specs_is_idempotent(tmp_path): + module = _load_generate_swagger_specs_module() + + first_paths = module.generate_specs(tmp_path / "first") + second_paths = module.generate_specs(tmp_path / "second") + + assert [path.name for path in first_paths] == [path.name for path in second_paths] + for first_path, second_path in zip(first_paths, second_paths): + assert first_path.read_text(encoding="utf-8") == second_path.read_text(encoding="utf-8") diff --git a/api/tests/unit_tests/configs/test_dify_config.py b/api/tests/unit_tests/configs/test_dify_config.py index bad246a4bb..57dbf453de 100644 --- a/api/tests/unit_tests/configs/test_dify_config.py +++ b/api/tests/unit_tests/configs/test_dify_config.py @@ -114,8 +114,8 @@ def test_flask_configs(monkeypatch: pytest.MonkeyPatch): "pool_recycle": 3600, "pool_size": 30, "pool_use_lifo": False, - "pool_reset_on_return": None, "pool_timeout": 30, + "pool_reset_on_return": "rollback", } assert config["CONSOLE_WEB_URL"] == "https://example.com" diff --git a/api/tests/unit_tests/controllers/common/test_helpers.py b/api/tests/unit_tests/controllers/common/test_helpers.py index 59c463177c..376a7a90c5 100644 --- a/api/tests/unit_tests/controllers/common/test_helpers.py +++ b/api/tests/unit_tests/controllers/common/test_helpers.py @@ -57,7 +57,7 @@ class TestGuessFileInfoFromResponse: (False, "bin"), ], ) - def test_generated_filename_when_missing(self, monkeypatch, magic_available, expected_ext): + def test_generated_filename_when_missing(self, monkeypatch: pytest.MonkeyPatch, magic_available, expected_ext): if magic_available: if helpers.magic is None: pytest.skip("python-magic is not installed, cannot run 'magic_available=True' test variant") @@ -155,7 +155,7 @@ class TestMagicImportWarnings: ) def test_magic_import_warning_per_platform( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, platform_name, expected_message, ): diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index 56c8160f02..575f8c839c 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -1,10 +1,11 @@ import sys from enum import StrEnum +from typing import Literal from unittest.mock import MagicMock, patch import pytest from flask_restx import Namespace -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, Field class UserModel(BaseModel): @@ -17,6 +18,35 @@ class ProductModel(BaseModel): price: float +class ChildModel(BaseModel): + value: str + + +class ParentModel(BaseModel): + child: ChildModel + + +class StatusEnum(StrEnum): + ACTIVE = "active" + INACTIVE = "inactive" + + +class PriorityEnum(StrEnum): + HIGH = "high" + LOW = "low" + + +class QueryModel(BaseModel): + model_config = ConfigDict(populate_by_name=True) + + page: int = Field(default=1, ge=1, le=100, description="Page number") + keyword: str | None = Field(default=None, min_length=1, max_length=50, description="Search keyword") + status: Literal["active", "inactive"] | None = Field(default=None, description="Status filter") + app_id: str = Field(..., alias="appId", description="Application ID") + tag_ids: list[str] = Field(default_factory=list, min_length=1, max_length=3, description="Tag IDs") + ambiguous: int | str | None = Field(default=None, description="Ambiguous query parameter") + + @pytest.fixture(autouse=True) def mock_console_ns(): """Mock the console_ns to avoid circular imports during test collection.""" @@ -64,6 +94,22 @@ def test_register_schema_model_passes_schema_from_pydantic(): assert schema == expected_schema +def test_register_schema_model_promotes_nested_pydantic_definitions(): + from controllers.common.schema import DEFAULT_REF_TEMPLATE_SWAGGER_2_0, register_schema_model + + namespace = MagicMock(spec=Namespace) + + register_schema_model(namespace, ParentModel) + + called_schemas = {call.args[0]: call.args[1] for call in namespace.schema_model.call_args_list} + parent_schema = ParentModel.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + + assert set(called_schemas) == {"ParentModel", "ChildModel"} + assert "$defs" not in called_schemas["ParentModel"] + assert called_schemas["ParentModel"]["properties"]["child"]["$ref"] == "#/definitions/ChildModel" + assert called_schemas["ChildModel"] == parent_schema["$defs"]["ChildModel"] + + def test_register_schema_models_registers_multiple_models(): from controllers.common.schema import register_schema_models @@ -77,7 +123,7 @@ def test_register_schema_models_registers_multiple_models(): assert called_names == ["UserModel", "ProductModel"] -def test_register_schema_models_calls_register_schema_model(monkeypatch): +def test_register_schema_models_calls_register_schema_model(monkeypatch: pytest.MonkeyPatch): from controllers.common.schema import register_schema_models namespace = MagicMock(spec=Namespace) @@ -100,16 +146,6 @@ def test_register_schema_models_calls_register_schema_model(monkeypatch): ] -class StatusEnum(StrEnum): - ACTIVE = "active" - INACTIVE = "inactive" - - -class PriorityEnum(StrEnum): - HIGH = "high" - LOW = "low" - - def test_get_or_create_model_returns_existing_model(mock_console_ns): from controllers.common.schema import get_or_create_model @@ -187,3 +223,54 @@ def test_register_enum_models_uses_correct_ref_template(): # Verify the schema contains enum values assert "enum" in schema or "anyOf" in schema + + +def test_query_params_from_model_builds_flask_restx_doc_params(): + from controllers.common.schema import query_params_from_model + + params = query_params_from_model(QueryModel) + + assert params["page"] == { + "in": "query", + "required": False, + "description": "Page number", + "type": "integer", + "default": 1, + "minimum": 1, + "maximum": 100, + } + assert params["keyword"] == { + "in": "query", + "required": False, + "description": "Search keyword", + "type": "string", + "minLength": 1, + "maxLength": 50, + } + assert params["status"] == { + "in": "query", + "required": False, + "description": "Status filter", + "type": "string", + "enum": ["active", "inactive"], + } + assert params["appId"] == { + "in": "query", + "required": True, + "description": "Application ID", + "type": "string", + } + assert params["tag_ids"] == { + "in": "query", + "required": False, + "description": "Tag IDs", + "type": "array", + "items": {"type": "string"}, + "minItems": 1, + "maxItems": 3, + } + assert params["ambiguous"] == { + "in": "query", + "required": False, + "description": "Ambiguous query parameter", + } diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py index 412edb9dfe..66d257ee66 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py @@ -68,7 +68,7 @@ def _segment(): ) -def test_get_segment_with_summary(monkeypatch): +def test_get_segment_with_summary(monkeypatch: pytest.MonkeyPatch): segment = _segment() summary = SimpleNamespace(summary_content="summary") diff --git a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py index 09ed2aaf69..4fa5d21493 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -35,7 +36,7 @@ def dataset(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass all decorators on the API method.""" mocker.patch( "controllers.console.datasets.hit_testing.setup_required", @@ -56,7 +57,7 @@ def bypass_decorators(mocker): class TestHitTestingApi: - def test_hit_testing_success(self, app, dataset, dataset_id): + def test_hit_testing_success(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -99,7 +100,7 @@ class TestHitTestingApi: assert "records" in result assert result["records"] == [] - def test_hit_testing_success_with_optional_record_fields(self, app, dataset, dataset_id): + def test_hit_testing_success_with_optional_record_fields(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestHitTestingApi: assert result["query"] == payload["query"] assert result["records"] == records - def test_hit_testing_dataset_not_found(self, app, dataset_id): + def test_hit_testing_dataset_not_found(self, app: Flask, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -175,7 +176,7 @@ class TestHitTestingApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_hit_testing_invalid_args(self, app, dataset, dataset_id): + def test_hit_testing_invalid_args(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py index 0105aacd65..4042190ff6 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -60,7 +61,7 @@ def metadata_id(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass setup/login/license decorators.""" mocker.patch( "controllers.console.datasets.metadata.setup_required", diff --git a/api/tests/unit_tests/controllers/console/datasets/test_website.py b/api/tests/unit_tests/controllers/console/datasets/test_website.py index 9f0da6e76f..9991a0d345 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_website.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_website.py @@ -2,6 +2,7 @@ from unittest.mock import Mock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from controllers.console import console_ns from controllers.console.datasets.error import WebsiteCrawlError @@ -31,7 +32,7 @@ def app(): @pytest.fixture(autouse=True) -def bypass_auth_and_setup(mocker): +def bypass_auth_and_setup(mocker: MockerFixture): """Bypass setup/login/account decorators.""" mocker.patch( "controllers.console.datasets.website.login_required", @@ -48,7 +49,7 @@ def bypass_auth_and_setup(mocker): class TestWebsiteCrawlApi: - def test_crawl_success(self, app, mocker): + def test_crawl_success(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestWebsiteCrawlApi: assert status == 200 assert result["job_id"] == "job-1" - def test_crawl_invalid_payload(self, app, mocker): + def test_crawl_invalid_payload(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -113,7 +114,7 @@ class TestWebsiteCrawlApi: with pytest.raises(WebsiteCrawlError, match="invalid payload"): method(api) - def test_crawl_service_error(self, app, mocker): + def test_crawl_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestWebsiteCrawlApi: class TestWebsiteCrawlStatusApi: - def test_get_status_success(self, app, mocker): + def test_get_status_success(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -181,7 +182,7 @@ class TestWebsiteCrawlStatusApi: assert status == 200 assert result["status"] == "completed" - def test_get_status_invalid_provider(self, app, mocker): + def test_get_status_invalid_provider(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -203,7 +204,7 @@ class TestWebsiteCrawlStatusApi: with pytest.raises(WebsiteCrawlError, match="invalid provider"): method(api, job_id) - def test_get_status_service_error(self, app, mocker): + def test_get_status_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py index e358435de4..2cfa938af8 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py @@ -1,6 +1,7 @@ from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from controllers.console.datasets.error import PipelineNotFoundError from controllers.console.datasets.wraps import get_rag_pipeline @@ -16,7 +17,7 @@ class TestGetRagPipeline: with pytest.raises(ValueError, match="missing pipeline_id"): dummy_view() - def test_pipeline_not_found(self, mocker): + def test_pipeline_not_found(self, mocker: MockerFixture): @get_rag_pipeline def dummy_view(**kwargs): return "ok" @@ -34,7 +35,7 @@ class TestGetRagPipeline: with pytest.raises(PipelineNotFoundError): dummy_view(pipeline_id="pipeline-1") - def test_pipeline_found_and_injected(self, mocker): + def test_pipeline_found_and_injected(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) pipeline.id = "pipeline-1" pipeline.tenant_id = "tenant-1" @@ -57,7 +58,7 @@ class TestGetRagPipeline: assert result is pipeline - def test_pipeline_id_removed_from_kwargs(self, mocker): + def test_pipeline_id_removed_from_kwargs(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline @@ -79,7 +80,7 @@ class TestGetRagPipeline: assert result == "ok" - def test_pipeline_id_cast_to_string(self, mocker): + def test_pipeline_id_cast_to_string(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline diff --git a/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py b/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py index 557fded37e..89cbea5ddc 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py +++ b/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py @@ -126,7 +126,7 @@ class TestRecommendedAppResponseModels: }, "app_id": "app-1", "description": "desc", - "category": "cat", + "categories": ["cat", "other"], "position": 1, "is_listed": True, "can_trial": False, @@ -137,4 +137,5 @@ class TestRecommendedAppResponseModels: ).model_dump(mode="json") assert response["recommended_apps"][0]["app_id"] == "app-1" + assert response["recommended_apps"][0]["categories"] == ["cat", "other"] assert response["categories"] == ["cat"] diff --git a/api/tests/unit_tests/controllers/console/test_admin.py b/api/tests/unit_tests/controllers/console/test_admin.py index 16197fcd0c..27f332ac51 100644 --- a/api/tests/unit_tests/controllers/console/test_admin.py +++ b/api/tests/unit_tests/controllers/console/test_admin.py @@ -4,6 +4,7 @@ import uuid from unittest.mock import Mock, PropertyMock, patch import pytest +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound, Unauthorized from controllers.console.admin import ( @@ -18,7 +19,7 @@ from models.model import App, InstalledApp, RecommendedApp @pytest.fixture(autouse=True) -def bypass_only_edition_cloud(mocker): +def bypass_only_edition_cloud(mocker: MockerFixture): """ Bypass only_edition_cloud decorator by setting EDITION to "CLOUD". """ @@ -29,7 +30,7 @@ def bypass_only_edition_cloud(mocker): @pytest.fixture -def mock_admin_auth(mocker): +def mock_admin_auth(mocker: MockerFixture): """ Provide valid admin authentication for controller tests. """ @@ -44,7 +45,7 @@ def mock_admin_auth(mocker): @pytest.fixture -def mock_console_payload(mocker): +def mock_console_payload(mocker: MockerFixture): payload = { "app_id": str(uuid.uuid4()), "language": "en-US", @@ -62,7 +63,7 @@ def mock_console_payload(mocker): @pytest.fixture -def mock_banner_payload(mocker): +def mock_banner_payload(mocker: MockerFixture): mocker.patch( "flask_restx.namespace.Namespace.payload", new_callable=PropertyMock, @@ -78,7 +79,7 @@ def mock_banner_payload(mocker): @pytest.fixture -def mock_session_factory(mocker): +def mock_session_factory(mocker: MockerFixture): mock_session = Mock() mock_session.execute = Mock() mock_session.add = Mock() @@ -97,7 +98,7 @@ class TestDeleteExploreBannerApi: def setup_method(self): self.api = DeleteExploreBannerApi() - def test_delete_banner_not_found(self, mocker, mock_admin_auth): + def test_delete_banner_not_found(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -106,7 +107,7 @@ class TestDeleteExploreBannerApi: with pytest.raises(NotFound, match="is not found"): self.api.delete(uuid.uuid4()) - def test_delete_banner_success(self, mocker, mock_admin_auth): + def test_delete_banner_success(self, mocker: MockerFixture, mock_admin_auth): mock_banner = Mock() mocker.patch( @@ -126,7 +127,7 @@ class TestInsertExploreBannerApi: def setup_method(self): self.api = InsertExploreBannerApi() - def test_insert_banner_success(self, mocker, mock_admin_auth, mock_banner_payload): + def test_insert_banner_success(self, mocker: MockerFixture, mock_admin_auth, mock_banner_payload): mocker.patch("controllers.console.admin.db.session.add") mocker.patch("controllers.console.admin.db.session.commit") @@ -168,7 +169,7 @@ class TestInsertExploreAppApiDelete: def setup_method(self): self.api = InsertExploreAppApi() - def test_delete_when_not_in_explore(self, mocker, mock_admin_auth): + def test_delete_when_not_in_explore(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.session_factory.create_session", return_value=Mock( @@ -183,7 +184,7 @@ class TestInsertExploreAppApiDelete: assert status == 204 assert response["result"] == "success" - def test_delete_when_in_explore_with_trial_app(self, mocker, mock_admin_auth): + def test_delete_when_in_explore_with_trial_app(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app from explore that has a trial app.""" app_id = uuid.uuid4() @@ -225,7 +226,7 @@ class TestInsertExploreAppApiDelete: assert response["result"] == "success" assert mock_app.is_public is False - def test_delete_with_installed_apps(self, mocker, mock_admin_auth): + def test_delete_with_installed_apps(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app that has installed apps in other tenants.""" app_id = uuid.uuid4() @@ -270,7 +271,7 @@ class TestInsertExploreAppListApi: def setup_method(self): self.api = InsertExploreAppListApi() - def test_app_not_found(self, mocker, mock_admin_auth, mock_console_payload): + def test_app_not_found(self, mocker: MockerFixture, mock_admin_auth, mock_console_payload): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -281,7 +282,7 @@ class TestInsertExploreAppListApi: def test_create_recommended_app( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, ): @@ -318,7 +319,9 @@ class TestInsertExploreAppListApi: assert response["result"] == "success" assert mock_app.is_public is True - def test_update_recommended_app(self, mocker, mock_admin_auth, mock_console_payload, mock_session_factory): + def test_update_recommended_app( + self, mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory + ): mock_app = Mock(spec=App) mock_app.id = "app-id" mock_app.site = None @@ -344,7 +347,7 @@ class TestInsertExploreAppListApi: def test_site_data_overrides_payload( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -381,7 +384,7 @@ class TestInsertExploreAppListApi: def test_create_trial_app_when_can_trial_enabled( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -413,7 +416,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_with_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -450,7 +453,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_without_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, diff --git a/api/tests/unit_tests/controllers/console/test_feature.py b/api/tests/unit_tests/controllers/console/test_feature.py index d8debc1f2c..1711aede61 100644 --- a/api/tests/unit_tests/controllers/console/test_feature.py +++ b/api/tests/unit_tests/controllers/console/test_feature.py @@ -1,3 +1,4 @@ +from pytest_mock import MockerFixture from werkzeug.exceptions import Unauthorized @@ -11,7 +12,7 @@ def unwrap(func): class TestFeatureApi: - def test_get_tenant_features_success(self, mocker): + def test_get_tenant_features_success(self, mocker: MockerFixture): from controllers.console.feature import FeatureApi mocker.patch( @@ -32,7 +33,7 @@ class TestFeatureApi: class TestSystemFeatureApi: - def test_get_system_features_authenticated(self, mocker): + def test_get_system_features_authenticated(self, mocker: MockerFixture): """ current_user.is_authenticated == True """ @@ -56,7 +57,7 @@ class TestSystemFeatureApi: assert result == {"features": {"sys_feature": True}} - def test_get_system_features_unauthenticated(self, mocker): + def test_get_system_features_unauthenticated(self, mocker: MockerFixture): """ current_user.is_authenticated raises Unauthorized """ diff --git a/api/tests/unit_tests/controllers/console/workspace/test_models.py b/api/tests/unit_tests/controllers/console/workspace/test_models.py index 4246e3c04c..3c4acbab44 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_models.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_models.py @@ -32,7 +32,7 @@ class TestDefaultModelApi: with ( app.test_request_context( "/", - query_string={"model_type": ModelType.LLM.value}, + query_string={"model_type": ModelType.LLM}, ), patch( "controllers.console.workspace.models.current_account_with_tenant", @@ -53,7 +53,7 @@ class TestDefaultModelApi: payload = { "model_settings": [ { - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "provider": "openai", "model": "gpt-4", } @@ -77,7 +77,7 @@ class TestDefaultModelApi: method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, ): @@ -113,7 +113,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "load_balancing": { "configs": [{"weight": 1}], "enabled": True, @@ -139,7 +139,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -180,7 +180,7 @@ class TestModelProviderModelCredentialApi: "/", query_string={ "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, }, ), patch( @@ -208,7 +208,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -229,7 +229,7 @@ class TestModelProviderModelCredentialApi: method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, patch("controllers.console.workspace.models.ModelLoadBalancingService") as lb, @@ -248,7 +248,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "123e4567-e89b-12d3-a456-426614174000", } @@ -269,7 +269,7 @@ class TestModelProviderModelCredentialSwitchApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "abc", } @@ -293,7 +293,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -314,7 +314,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -337,7 +337,7 @@ class TestModelProviderModelValidateApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -360,7 +360,7 @@ class TestModelProviderModelValidateApi: payload = { "model": model_name, - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {}, } @@ -412,7 +412,7 @@ class TestParameterAndAvailableModels: ): service_mock.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert "data" in result @@ -442,6 +442,6 @@ class TestParameterAndAvailableModels: ): service.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert result["data"] == [] diff --git a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py index d1b09c3a58..598677faff 100644 --- a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py +++ b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py @@ -189,7 +189,7 @@ class TestGetUserTenant: """Test get_user_tenant decorator""" @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch): """Test that decorator injects tenant_model and user_model into kwargs""" # Arrange @@ -244,7 +244,9 @@ class TestGetUserTenant: protected_view() @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_use_default_session_id_when_user_id_empty(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_use_default_session_id_when_user_id_empty( + self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch + ): """Test that default session ID is used when user_id is empty string""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py index 6dc8f54d42..74c13d50f6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py @@ -340,7 +340,7 @@ class TestConversationAppModeValidation: @pytest.mark.parametrize( "mode", [ - AppMode.CHAT.value, + AppMode.CHAT, AppMode.AGENT_CHAT.value, AppMode.ADVANCED_CHAT.value, ], @@ -365,7 +365,7 @@ class TestConversationAppModeValidation: app raises NotChatAppError. """ app = Mock(spec=App) - app.mode = AppMode.COMPLETION.value + app.mode = AppMode.COMPLETION app_mode = AppMode.value_of(app.mode) assert app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT} @@ -498,7 +498,7 @@ class TestConversationApiController: def test_list_not_chat(self, app) -> None: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations", method="GET"): @@ -531,7 +531,7 @@ class TestConversationApiController: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -546,7 +546,7 @@ class TestConversationDetailApiController: def test_delete_not_chat(self, app) -> None: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -562,7 +562,7 @@ class TestConversationDetailApiController: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -580,7 +580,7 @@ class TestConversationRenameApiController: api = ConversationRenameApi() handler = _unwrap(api.post) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -596,7 +596,7 @@ class TestConversationVariablesApiController: def test_not_chat(self, app) -> None: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1/variables", method="GET"): @@ -612,7 +612,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -645,7 +645,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -671,7 +671,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -697,7 +697,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -731,7 +731,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( diff --git a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py index 3cc444e467..9c310a4f45 100644 --- a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py +++ b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py @@ -3,6 +3,7 @@ from unittest.mock import Mock from uuid import UUID, uuid4 import pytest +from pytest_mock import MockerFixture from controllers.service_api.end_user.end_user import EndUserApi from controllers.service_api.end_user.error import EndUserNotFoundError @@ -21,7 +22,9 @@ class TestEndUserApi: app.tenant_id = str(uuid4()) return app - def test_get_end_user_returns_all_attributes(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_returns_all_attributes( + self, mocker: MockerFixture, resource: EndUserApi, app_model: App + ) -> None: end_user = Mock(spec=EndUser) end_user.id = str(uuid4()) end_user.tenant_id = app_model.tenant_id @@ -54,7 +57,7 @@ class TestEndUserApi: assert result["created_at"].startswith("2024-01-01T00:00:00") assert result["updated_at"].startswith("2024-01-02T00:00:00") - def test_get_end_user_not_found(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_not_found(self, mocker: MockerFixture, resource: EndUserApi, app_model: App) -> None: mocker.patch("controllers.service_api.end_user.end_user.EndUserService.get_end_user_by_id", return_value=None) with pytest.raises(EndUserNotFoundError): diff --git a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py index 9073ae1044..c1a4da8cd3 100644 --- a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py +++ b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py @@ -12,12 +12,13 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.output_parser.cot_output_parser import CotAgentOutputParser @pytest.fixture -def mock_action_class(mocker): +def mock_action_class(mocker: MockerFixture): mock_action = MagicMock() mocker.patch( "core.agent.output_parser.cot_output_parser.AgentScratchpadUnit.Action", diff --git a/api/tests/unit_tests/core/agent/strategy/test_plugin.py b/api/tests/unit_tests/core/agent/strategy/test_plugin.py index e0894f1e90..0fea04845d 100644 --- a/api/tests/unit_tests/core/agent/strategy/test_plugin.py +++ b/api/tests/unit_tests/core/agent/strategy/test_plugin.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.strategy.plugin import PluginAgentStrategy @@ -213,7 +214,9 @@ class TestInvoke: (None, None, "msg"), ], ) - def test_invoke_optional_arguments(self, strategy, mocker, conversation_id, app_id, message_id) -> None: + def test_invoke_optional_arguments( + self, strategy, mocker: MockerFixture, conversation_id, app_id, message_id + ) -> None: mock_manager = MagicMock() mock_manager.invoke = MagicMock(return_value=iter([])) diff --git a/api/tests/unit_tests/core/agent/test_base_agent_runner.py b/api/tests/unit_tests/core/agent/test_base_agent_runner.py index db4b293b16..d5fb853ee3 100644 --- a/api/tests/unit_tests/core/agent/test_base_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_base_agent_runner.py @@ -3,6 +3,7 @@ from decimal import Decimal from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.agent.base_agent_runner as module from core.agent.base_agent_runner import BaseAgentRunner @@ -13,7 +14,7 @@ from core.agent.base_agent_runner import BaseAgentRunner @pytest.fixture -def mock_db_session(mocker): +def mock_db_session(mocker: MockerFixture): session = mocker.MagicMock() mocker.patch.object(module.db, "session", session) return session @@ -41,13 +42,13 @@ def runner(mocker, mock_db_session): class TestRepack: - def test_sets_empty_if_none(self, runner, mocker): + def test_sets_empty_if_none(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = None result = runner._repack_app_generate_entity(entity) assert result.app_config.prompt_template.simple_prompt_template == "" - def test_keeps_existing(self, runner, mocker): + def test_keeps_existing(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = "abc" result = runner._repack_app_generate_entity(entity) @@ -60,7 +61,7 @@ class TestRepack: class TestUpdatePromptTool: - def build_param(self, mocker, **kwargs): + def build_param(self, mocker: MockerFixture, **kwargs): p = mocker.MagicMock() p.form = kwargs.get("form") @@ -75,7 +76,7 @@ class TestUpdatePromptTool: p.required = kwargs.get("required", False) return p - def test_skip_non_llm(self, runner, mocker): + def test_skip_non_llm(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form="NOT_LLM") tool.get_runtime_parameters.return_value = [param] @@ -86,7 +87,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_enum_and_required(self, runner, mocker): + def test_enum_and_required(self, runner, mocker: MockerFixture): option = mocker.MagicMock(value="opt1") param = self.build_param( mocker, @@ -104,7 +105,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert "p1" in result.parameters["required"] - def test_skip_file_type_param(self, runner, mocker): + def test_skip_file_type_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form=module.ToolParameter.ToolParameterForm.LLM) param.type = module.ToolParameter.ToolParameterType.FILE @@ -116,7 +117,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_duplicate_required_not_duplicated(self, runner, mocker): + def test_duplicate_required_not_duplicated(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param( @@ -141,7 +142,7 @@ class TestUpdatePromptTool: class TestCreateAgentThought: - def test_with_files(self, runner, mock_db_session, mocker): + def test_with_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=10) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -149,7 +150,7 @@ class TestCreateAgentThought: assert result == "10" assert runner.agent_thought_count == 1 - def test_without_files(self, runner, mock_db_session, mocker): + def test_without_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=11) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -163,7 +164,7 @@ class TestCreateAgentThought: class TestSaveAgentThought: - def setup_agent(self, mocker): + def setup_agent(self, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;tool2" agent.tool_labels = {} @@ -175,7 +176,7 @@ class TestSaveAgentThought: with pytest.raises(ValueError): runner.save_agent_thought("id", None, None, None, None, None, None, [], None) - def test_full_update(self, runner, mock_db_session, mocker): + def test_full_update(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -210,7 +211,7 @@ class TestSaveAgentThought: assert agent.tokens == 3 assert "tool1" in json.loads(agent.tool_labels_str) - def test_label_fallback_when_none(self, runner, mock_db_session, mocker): + def test_label_fallback_when_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) agent.tool = "unknown_tool" mock_db_session.scalar.return_value = agent @@ -220,7 +221,7 @@ class TestSaveAgentThought: labels = json.loads(agent.tool_labels_str) assert "unknown_tool" in labels - def test_json_failure_paths(self, runner, mock_db_session, mocker): + def test_json_failure_paths(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -241,13 +242,13 @@ class TestSaveAgentThought: assert mock_db_session.commit.called - def test_messages_ids_none(self, runner, mock_db_session, mocker): + def test_messages_ids_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent runner.save_agent_thought("id", None, None, None, None, None, None, None, None) assert mock_db_session.commit.called - def test_success_dict_serialization(self, runner, mock_db_session, mocker): + def test_success_dict_serialization(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -273,19 +274,19 @@ class TestSaveAgentThought: class TestOrganizeUserPrompt: - def test_no_files(self, runner, mock_db_session, mocker): + def test_no_files(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_with_files_no_config(self, runner, mock_db_session, mocker): + def test_with_files_no_config(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_image_detail_low_fallback(self, runner, mock_db_session, mocker): + def test_image_detail_low_fallback(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() file_config.image_config = mocker.MagicMock(detail=None) @@ -305,27 +306,27 @@ class TestOrganizeUserPrompt: class TestOrganizeHistory: - def test_empty(self, runner, mock_db_session, mocker): + def test_empty(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) result = runner.organize_agent_history([]) assert result == [] - def test_with_answer_only(self, runner, mock_db_session, mocker): + def test_with_answer_only(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="m1", answer="ans", agent_thoughts=[], app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert any(isinstance(x, module.AssistantPromptMessage) for x in result) - def test_skip_current_message(self, runner, mock_db_session, mocker): + def test_skip_current_message(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="msg_current", agent_thoughts=[], answer="ans", app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert result == [] - def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker): + def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input="invalid", @@ -341,7 +342,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_empty_tool_name_split(self, runner, mock_db_session, mocker): + def test_empty_tool_name_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=";", thought="thinking") msg = mocker.MagicMock(id="m5", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -350,7 +351,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_valid_json_tool_flow(self, runner, mock_db_session, mocker): + def test_valid_json_tool_flow(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=json.dumps({"tool1": {"x": 1}}), @@ -379,7 +380,7 @@ class TestOrganizeHistory: class TestConvertToolToPromptMessageTool: - def test_basic_conversion(self, runner, mocker): + def test_basic_conversion(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") runtime_param = mocker.MagicMock() @@ -404,7 +405,7 @@ class TestConvertToolToPromptMessageTool: prompt_tool, entity = runner._convert_tool_to_prompt_message_tool(tool) assert entity == tool_entity - def test_full_conversion_multiple_params(self, runner, mocker): + def test_full_conversion_multiple_params(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") # LLM param with input_schema override @@ -441,7 +442,7 @@ class TestConvertToolToPromptMessageTool: class TestInitPromptToolsExtended: - def test_agent_tool_branch(self, runner, mocker): + def test_agent_tool_branch(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="agent_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", return_value=(MagicMock(), "entity")) @@ -449,7 +450,7 @@ class TestInitPromptToolsExtended: tools, prompts = runner._init_prompt_tools() assert "agent_tool" in tools - def test_exception_in_conversion(self, runner, mocker): + def test_exception_in_conversion(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="bad_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", side_effect=Exception) @@ -464,7 +465,7 @@ class TestInitPromptToolsExtended: class TestAdditionalCoverage: - def test_update_prompt_with_input_schema(self, runner, mocker): + def test_update_prompt_with_input_schema(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = mocker.MagicMock() @@ -487,7 +488,7 @@ class TestAdditionalCoverage: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"]["p1"]["type"] == "number" - def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker): + def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {"tool1": {"en_US": "existing"}} @@ -498,7 +499,7 @@ class TestAdditionalCoverage: labels = json.loads(agent.tool_labels_str) assert labels["tool1"]["en_US"] == "existing" - def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker): + def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -508,7 +509,7 @@ class TestAdditionalCoverage: runner.save_agent_thought("id", None, None, None, None, "meta_string", None, [], None) assert agent.tool_meta_str == "meta_string" - def test_convert_dataset_retriever_tool(self, runner, mocker): + def test_convert_dataset_retriever_tool(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -525,7 +526,7 @@ class TestAdditionalCoverage: prompt = runner._convert_dataset_retriever_tool_to_prompt_message_tool(ds_tool) assert prompt is not None - def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker): + def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() @@ -544,7 +545,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_user_prompt(msg) assert result is not None - def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker): + def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=None, thought="thinking") msg = mocker.MagicMock(id="m3", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -554,7 +555,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker): + def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1;tool2", tool_input=json.dumps({"tool1": {}, "tool2": {}}), @@ -572,7 +573,7 @@ class TestAdditionalCoverage: # ================= Additional Surgical Coverage ================= - def test_convert_tool_select_enum_branch(self, runner, mocker): + def test_convert_tool_select_enum_branch(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -599,7 +600,7 @@ class TestAdditionalCoverage: class TestConvertDatasetRetrieverTool: - def test_required_param_added(self, runner, mocker): + def test_required_param_added(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -619,7 +620,7 @@ class TestConvertDatasetRetrieverTool: class TestBaseAgentRunnerInit: - def test_init_sets_stream_tool_call_and_files(self, mocker): + def test_init_sets_stream_tool_call_and_files(self, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = 2 mocker.patch.object(module.db, "session", session) @@ -662,7 +663,7 @@ class TestBaseAgentRunnerInit: class TestBaseAgentRunnerCoverage: - def test_convert_tool_skips_non_llm_param(self, runner, mocker): + def test_convert_tool_skips_non_llm_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -680,7 +681,7 @@ class TestBaseAgentRunnerCoverage: assert prompt_tool.parameters["properties"] == {} - def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker): + def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker: MockerFixture): dataset_tool = mocker.MagicMock() dataset_tool.entity.identity.name = "ds" runner.dataset_tools = [dataset_tool] @@ -692,7 +693,7 @@ class TestBaseAgentRunnerCoverage: assert tools["ds"] == dataset_tool assert len(prompt_tools) == 1 - def test_update_prompt_message_tool_select_enum(self, runner, mocker): + def test_update_prompt_message_tool_select_enum(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() option1 = mocker.MagicMock(value="A") @@ -716,7 +717,7 @@ class TestBaseAgentRunnerCoverage: assert result.parameters["properties"]["select_param"]["enum"] == ["A", "B"] - def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker): + def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -754,7 +755,7 @@ class TestBaseAgentRunnerCoverage: assert isinstance(agent.observation, str) assert isinstance(agent.tool_meta_str, str) - def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker): + def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;;" agent.tool_labels = {} @@ -768,7 +769,7 @@ class TestBaseAgentRunnerCoverage: labels = json.loads(agent.tool_labels_str) assert "" not in labels - def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker): + def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) @@ -778,7 +779,7 @@ class TestBaseAgentRunnerCoverage: assert system_message in result - def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker): + def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=None, diff --git a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py index cde8820e00..314305d371 100644 --- a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py @@ -2,6 +2,7 @@ import json from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.cot_agent_runner import CotAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -25,7 +26,7 @@ class DummyRunner(CotAgentRunner): @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Prevent BaseAgentRunner __init__ from hitting database mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.organize_agent_history", @@ -165,7 +166,7 @@ class TestHandleInvokeAction: response, meta = runner._handle_invoke_action(action, {}, []) assert "there is not a tool named" in response - def test_tool_with_json_string_args(self, runner, mocker): + def test_tool_with_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input=json.dumps({"a": 1})) tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -180,7 +181,7 @@ class TestHandleInvokeAction: class TestOrganizeHistoricPromptMessages: - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch( "core.agent.cot_agent_runner.AgentHistoryPromptTransform.get_prompt", return_value=[], @@ -190,7 +191,7 @@ class TestOrganizeHistoricPromptMessages: class TestRun: - def test_run_handles_empty_parser_output(self, runner, mocker): + def test_run_handles_empty_parser_output(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -202,7 +203,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert isinstance(results, list) - def test_run_with_action_and_tool_invocation(self, runner, mocker): + def test_run_with_action_and_tool_invocation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -223,7 +224,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_respects_max_iteration_boundary(self, runner, mocker): + def test_run_respects_max_iteration_boundary(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 1 message = MagicMock() message.id = "msg-id" @@ -245,7 +246,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_basic_flow(self, runner, mocker): + def test_run_basic_flow(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -257,7 +258,7 @@ class TestRun: results = list(runner.run(message, "query", {"name": "John"})) assert results - def test_run_max_iteration_error(self, runner, mocker): + def test_run_max_iteration_error(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 0 message = MagicMock() message.id = "msg-id" @@ -272,7 +273,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {})) - def test_run_increase_usage_aggregation(self, runner, mocker): + def test_run_increase_usage_aggregation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" runner.app_config.agent.max_iteration = 2 @@ -329,7 +330,7 @@ class TestRun: assert final_usage.completion_price == 2 assert final_usage.total_price == 4 - def test_run_when_no_action_branch(self, runner, mocker): + def test_run_when_no_action_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -341,7 +342,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "" - def test_run_usage_missing_key_branch(self, runner, mocker): + def test_run_usage_missing_key_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -354,7 +355,7 @@ class TestRun: list(runner.run(message, "query", {})) - def test_run_prompt_tool_update_branch(self, runner, mocker): + def test_run_prompt_tool_update_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -410,7 +411,7 @@ class TestRun: class TestInitReactState: - def test_init_react_state_resets_state(self, runner, mocker): + def test_init_react_state_resets_state(self, runner, mocker: MockerFixture): mocker.patch.object(runner, "_organize_historic_prompt_messages", return_value=["historic"]) runner._agent_scratchpad = ["old"] runner._query = "old" @@ -423,7 +424,7 @@ class TestInitReactState: class TestHandleInvokeActionExtended: - def test_tool_with_invalid_json_string_args(self, runner, mocker): + def test_tool_with_invalid_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input="not-json") tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -457,7 +458,7 @@ class TestFillInputsEdgeCases: class TestOrganizeHistoricPromptMessagesExtended: - def test_user_message_flushes_scratchpad(self, runner, mocker): + def test_user_message_flushes_scratchpad(self, runner, mocker: MockerFixture): from graphon.model_runtime.entities.message_entities import UserPromptMessage user_message = UserPromptMessage(content="Hi") @@ -480,7 +481,7 @@ class TestOrganizeHistoricPromptMessagesExtended: with pytest.raises(NotImplementedError): runner._organize_historic_prompt_messages([]) - def test_agent_history_transform_invocation(self, runner, mocker): + def test_agent_history_transform_invocation(self, runner, mocker: MockerFixture): mock_transform = MagicMock() mock_transform.get_prompt.return_value = [] @@ -495,7 +496,7 @@ class TestOrganizeHistoricPromptMessagesExtended: class TestRunAdditionalBranches: - def test_run_with_no_action_final_answer_empty(self, runner, mocker): + def test_run_with_no_action_final_answer_empty(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -507,7 +508,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert any(hasattr(r, "delta") for r in results) - def test_run_with_final_answer_action_string(self, runner, mocker): + def test_run_with_final_answer_action_string(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -521,7 +522,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "done" - def test_run_with_final_answer_action_dict(self, runner, mocker): + def test_run_with_final_answer_action_dict(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -535,7 +536,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert json.loads(results[-1].delta.message.content) == {"a": 1} - def test_run_with_string_final_answer(self, runner, mocker): + def test_run_with_string_final_answer(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" diff --git a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py index ea8cc8aa86..8e7093fd12 100644 --- a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from pytest_mock import MockerFixture from core.agent.cot_chat_agent_runner import CotChatAgentRunner from graphon.model_runtime.entities.message_entities import TextPromptMessageContent @@ -55,7 +56,7 @@ def runner(): class TestOrganizeSystemPrompt: - def test_organize_system_prompt_success(self, runner, mocker): + def test_organize_system_prompt_success(self, runner, mocker: MockerFixture): first_prompt = "Instruction: {{instruction}}, Tools: {{tools}}, Names: {{tool_names}}" runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt(first_prompt))) @@ -154,7 +155,7 @@ class TestOrganizeUserQuery: class TestOrganizePromptMessages: - def test_no_scratchpad(self, runner, mocker): + def test_no_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -164,7 +165,7 @@ class TestOrganizePromptMessages: assert "query" in result runner._organize_historic_prompt_messages.assert_called_once() - def test_with_final_scratchpad(self, runner, mocker): + def test_with_final_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -177,7 +178,7 @@ class TestOrganizePromptMessages: combined = "".join([m.content for m in assistant_msgs if isinstance(m.content, str)]) assert "Final Answer: done" in combined - def test_with_thought_action_observation(self, runner, mocker): + def test_with_thought_action_observation(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -197,7 +198,7 @@ class TestOrganizePromptMessages: assert "Action: action" in combined assert "Observation: observe" in combined - def test_multiple_units_mixed(self, runner, mocker): + def test_multiple_units_mixed(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) diff --git a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py index 2f5873d865..0d949c357d 100644 --- a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner from graphon.model_runtime.entities.message_entities import ( @@ -74,7 +75,7 @@ class TestOrganizeInstructionPrompt: class TestOrganizeHistoricPrompt: - def test_with_user_and_assistant_string(self, runner, mocker): + def test_with_user_and_assistant_string(self, runner, mocker: MockerFixture): user_msg = UserPromptMessage(content="Hello") assistant_msg = AssistantPromptMessage(content="Hi there") @@ -89,7 +90,7 @@ class TestOrganizeHistoricPrompt: assert "Question: Hello" in result assert "Hi there" in result - def test_assistant_list_with_text_content(self, runner, mocker): + def test_assistant_list_with_text_content(self, runner, mocker: MockerFixture): text_content = TextPromptMessageContent(data="Partial answer") assistant_msg = AssistantPromptMessage(content=[text_content]) @@ -103,7 +104,7 @@ class TestOrganizeHistoricPrompt: assert "Partial answer" in result - def test_assistant_list_with_non_text_content_ignored(self, runner, mocker): + def test_assistant_list_with_non_text_content_ignored(self, runner, mocker: MockerFixture): non_text_content = ImagePromptMessageContent(format="url", mime_type="image/png") assistant_msg = AssistantPromptMessage(content=[non_text_content]) @@ -116,7 +117,7 @@ class TestOrganizeHistoricPrompt: result = runner._organize_historic_prompt() assert result == "" - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch.object( runner, "_organize_historic_prompt_messages", @@ -136,7 +137,7 @@ class TestOrganizePromptMessages: def test_full_flow_with_scratchpad( self, runner, - mocker, + mocker: MockerFixture, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory, @@ -171,7 +172,12 @@ class TestOrganizePromptMessages: assert "Question: What is Python?" in content def test_no_scratchpad( - self, runner, mocker, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory + self, + runner, + mocker: MockerFixture, + dummy_app_config_factory, + dummy_agent_config_factory, + dummy_prompt_entity_factory, ): template = "SYS {{historic_messages}} {{agent_scratchpad}} {{query}}" @@ -198,7 +204,7 @@ class TestOrganizePromptMessages: def test_partial_scratchpad_units( self, runner, - mocker, + mocker: MockerFixture, thought, action, observation, diff --git a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py index 17ab5babcb..3a4347e723 100644 --- a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py @@ -3,6 +3,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.errors import AgentMaxIterationError from core.agent.fc_agent_runner import FunctionCallAgentRunner @@ -68,7 +69,7 @@ class DummyResult: @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Completely bypass BaseAgentRunner __init__ to avoid DB / Flask context mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.__init__", @@ -230,7 +231,7 @@ class TestOrganizeUserQuery: result = runner._organize_user_query(None, []) assert len(result) == 1 - def test_with_files_uses_image_detail_config(self, runner, mocker): + def test_with_files_uses_image_detail_config(self, runner, mocker: MockerFixture): file_content = TextPromptMessageContent(data="file-content") mock_to_prompt = mocker.patch( "core.agent.fc_agent_runner.file_manager.to_prompt_message_content", @@ -352,7 +353,7 @@ class TestRunMethod: assert len(outputs) == 1 assert runner.save_agent_thought.call_args.kwargs["thought"] == "hi" - def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker): + def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") runner.stream_tool_call = True @@ -398,7 +399,7 @@ class TestRunMethod: outputs = list(runner.run(message, "query")) assert len(outputs) >= 1 - def test_run_with_tool_instance_and_files(self, runner, mocker): + def test_run_with_tool_instance_and_files(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") tool_call = MagicMock() diff --git a/api/tests/unit_tests/core/agent/test_plugin_entities.py b/api/tests/unit_tests/core/agent/test_plugin_entities.py index 9955190aca..aa3098a2a1 100644 --- a/api/tests/unit_tests/core/agent/test_plugin_entities.py +++ b/api/tests/unit_tests/core/agent/test_plugin_entities.py @@ -9,6 +9,7 @@ mocking; ensure entity invariants and validation rules remain stable. import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.agent.plugin_entities import ( AgentFeature, @@ -28,12 +29,12 @@ from core.tools.entities.tool_entities import ToolIdentity, ToolProviderIdentity @pytest.fixture -def mock_identity(mocker): +def mock_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyIdentity) @pytest.fixture -def mock_provider_identity(mocker): +def mock_provider_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyProviderIdentity) @@ -47,7 +48,7 @@ class TestAgentStrategyParameterType: "enum_member", list(AgentStrategyParameter.AgentStrategyParameterType), ) - def test_as_normal_type_calls_external_function(self, mocker, enum_member) -> None: + def test_as_normal_type_calls_external_function(self, mocker: MockerFixture, enum_member) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.as_normal_type", return_value="normalized", @@ -58,7 +59,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member) assert result == "normalized" - def test_as_normal_type_propagates_exception(self, mocker) -> None: + def test_as_normal_type_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.as_normal_type", @@ -79,7 +80,7 @@ class TestAgentStrategyParameterType: (AgentStrategyParameter.AgentStrategyParameterType.FILES, []), ], ) - def test_cast_value_calls_external_function(self, mocker, enum_member, value) -> None: + def test_cast_value_calls_external_function(self, mocker: MockerFixture, enum_member, value) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.cast_parameter_value", return_value="casted", @@ -90,7 +91,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member, value) assert result == "casted" - def test_cast_value_propagates_exception(self, mocker) -> None: + def test_cast_value_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.cast_parameter_value", @@ -136,7 +137,7 @@ class TestAgentStrategyParameter: assert any(error["loc"] == ("type",) for error in exc_info.value.errors()) - def test_init_frontend_parameter_calls_external(self, mocker) -> None: + def test_init_frontend_parameter_calls_external(self, mocker: MockerFixture) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", return_value="frontend", @@ -153,7 +154,7 @@ class TestAgentStrategyParameter: mock_func.assert_called_once_with(param, param.type, "value") assert result == "frontend" - def test_init_frontend_parameter_propagates_exception(self, mocker) -> None: + def test_init_frontend_parameter_propagates_exception(self, mocker: MockerFixture) -> None: mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", side_effect=RuntimeError("error"), diff --git a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py index 1c5b6ed944..6dbf301f65 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py @@ -10,7 +10,7 @@ class TestGetParametersFromFeatureDict: """Test suite for get_parameters_from_feature_dict""" @pytest.fixture - def mock_config(self, monkeypatch): + def mock_config(self, monkeypatch: pytest.MonkeyPatch): """Mock dify_config values""" mock = MagicMock() mock.UPLOAD_IMAGE_FILE_SIZE_LIMIT = 1 @@ -23,7 +23,7 @@ class TestGetParametersFromFeatureDict: return mock @pytest.fixture - def mock_default_file_limits(self, monkeypatch): + def mock_default_file_limits(self, monkeypatch: pytest.MonkeyPatch): """Mock DEFAULT_FILE_NUMBER_LIMITS constant""" monkeypatch.setattr(parameters_mapping, "DEFAULT_FILE_NUMBER_LIMITS", 99) return 99 diff --git a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py index 013ed0cbc4..bd4ca5ff85 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.common.sensitive_word_avoidance.manager import ( SensitiveWordAvoidanceConfigManager, @@ -26,7 +27,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result is None - def test_convert_returns_entity_when_enabled(self, mocker): + def test_convert_returns_entity_when_enabled(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() mocker.patch( @@ -48,7 +49,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result == mock_entity - def test_convert_enabled_without_type_or_config(self, mocker): + def test_convert_enabled_without_type_or_config(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() patched = mocker.patch( @@ -135,7 +136,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: with pytest.raises(ValueError, match="must be a dict"): SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(tenant_id="tenant1", config=config) - def test_validate_calls_moderation_factory(self, mocker): + def test_validate_calls_moderation_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -159,7 +160,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: assert result_config["sensitive_word_avoidance"]["enabled"] is True assert fields == ["sensitive_word_avoidance"] - def test_validate_sets_empty_dict_when_config_none(self, mocker): + def test_validate_sets_empty_dict_when_config_none(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -179,7 +180,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: # Assert mock_validate.assert_called_once_with(name="mock_type", tenant_id="tenant1", config={}) - def test_validate_only_structure_validate_skips_factory(self, mocker): + def test_validate_only_structure_validate_skips_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py index 992b580376..359b04070b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.agent.manager import AgentConfigManager @@ -84,7 +85,7 @@ class TestAgentConfigManagerConvert: assert result.strategy.name == "CHAIN_OF_THOUGHT" - def test_convert_skips_disabled_tools(self, mocker, base_config): + def test_convert_skips_disabled_tools(self, mocker: MockerFixture, base_config): # Patch AgentEntity to bypass pydantic validation mock_agent_entity = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentEntity", @@ -128,7 +129,7 @@ class TestAgentConfigManagerConvert: mock_validate.assert_called_once() mock_agent_entity.assert_called_once() - def test_convert_tool_requires_minimum_keys(self, mocker, base_config): + def test_convert_tool_requires_minimum_keys(self, mocker: MockerFixture, base_config): mock_validate = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentToolEntity.model_validate", return_value=MagicMock(), diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py index a688e2a5c5..3a239eac0e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py @@ -2,6 +2,7 @@ import uuid from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.dataset.manager import DatasetConfigManager from core.entities.agent_entities import PlanningStrategy @@ -69,7 +70,7 @@ class TestDatasetConfigManagerConvert: assert result.dataset_ids == [valid_uuid] assert result.retrieve_config.query_variable == "query" - def test_convert_single_with_metadata_configs(self, valid_uuid, mocker): + def test_convert_single_with_metadata_configs(self, valid_uuid, mocker: MockerFixture): mock_retrieve_config = MagicMock() mock_entity = MagicMock() mock_entity.dataset_ids = [valid_uuid] @@ -258,7 +259,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_invalid_uuid(self, mocker): + def test_extract_invalid_uuid(self, mocker: MockerFixture): invalid_uuid = "not-a-uuid" config = { "agent_mode": { @@ -270,7 +271,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_dataset_not_exists(self, valid_uuid, mocker): + def test_extract_dataset_not_exists(self, valid_uuid, mocker: MockerFixture): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, @@ -292,7 +293,7 @@ class TestExtractDatasetConfig: class TestIsDatasetExists: - def test_dataset_exists_true(self, mocker, valid_uuid): + def test_dataset_exists_true(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "tenant1" mocker.patch( @@ -302,14 +303,14 @@ class TestIsDatasetExists: assert DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_not_found(self, mocker, valid_uuid): + def test_dataset_exists_false_when_not_found(self, mocker: MockerFixture, valid_uuid): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, ) assert not DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_tenant_mismatch(self, mocker, valid_uuid): + def test_dataset_exists_false_when_tenant_mismatch(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "other" mocker.patch( diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py index 186b4a501d..e5b581b6a0 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.entities.model_entities import ModelStatus @@ -16,7 +17,7 @@ from graphon.model_runtime.entities.model_entities import ModelPropertyKey class TestModelConfigConverter: @pytest.fixture(autouse=True) - def patch_response_entity(self, mocker): + def patch_response_entity(self, mocker: MockerFixture): """ Patch ModelConfigWithCredentialsEntity to bypass Pydantic validation and return a simple namespace object instead. @@ -69,7 +70,7 @@ class TestModelConfigConverter: return bundle @pytest.fixture - def patch_provider_manager(self, mocker, mock_provider_bundle): + def patch_provider_manager(self, mocker: MockerFixture, mock_provider_bundle): mock_manager = MagicMock() mock_manager.get_provider_model_bundle.return_value = mock_provider_bundle mocker.patch( @@ -99,7 +100,7 @@ class TestModelConfigConverter: assert result.parameters == {"temperature": 0.7} assert result.stop == ["\n"] - def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_app_config.model.mode = None mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { @@ -116,7 +117,9 @@ class TestModelConfigConverter: result = ModelConfigConverter.convert(mock_app_config) assert result.mode == LLMMode.COMPLETION - def test_convert_mode_from_schema_invalid_fallback(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_invalid_fallback( + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture + ): mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { ModelPropertyKey.MODE: "invalid" } @@ -135,7 +138,7 @@ class TestModelConfigConverter: # Credential Errors # ============================= - def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_current_credentials.return_value = None mock_manager = MagicMock() @@ -152,7 +155,7 @@ class TestModelConfigConverter: # Provider Model Errors # ============================= - def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_provider_model.return_value = None mock_manager = MagicMock() @@ -174,7 +177,7 @@ class TestModelConfigConverter: ], ) def test_convert_provider_model_status_errors( - self, mock_app_config, mock_provider_bundle, mocker, status, expected_exception + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture, status, expected_exception ): mock_provider = MagicMock() mock_provider.status = status @@ -194,7 +197,7 @@ class TestModelConfigConverter: # Schema Errors # ============================= - def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.model_type_instance.get_model_schema.return_value = None mock_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py index 68bca485bb..72e334004e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture # Target from core.app.app_config.easy_ui_based_app.model_config.manager import ModelConfigManager @@ -107,7 +108,9 @@ class TestModelConfigManager: # validate_and_set_defaults # ========================================================== - def test_validate_and_set_defaults_success(self, mocker, valid_config, provider_entities, valid_model_list): + def test_validate_and_set_defaults_success( + self, mocker: MockerFixture, valid_config, provider_entities, valid_model_list + ): self._patch_model_assembly( mocker, provider_entities=provider_entities, @@ -127,35 +130,37 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="object type"): ModelConfigManager.validate_and_set_defaults("tenant1", {"model": "invalid"}) - def test_validate_and_set_defaults_missing_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_invalid_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "invalid/provider", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_missing_name(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_name(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.name is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_empty_models(self, mocker, provider_entities): + def test_validate_and_set_defaults_empty_models(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_model_name(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_invalid_model_name( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "invalid", "completion_params": {}}} self._patch_model_assembly( mocker, @@ -166,7 +171,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_default_mode_when_missing(self, mocker, provider_entities): + def test_validate_and_set_defaults_default_mode_when_missing(self, mocker: MockerFixture, provider_entities): model = MagicMock() model.model = "gpt-4" model.model_properties = {} @@ -178,7 +183,9 @@ class TestModelConfigManager: assert updated_config["model"]["mode"] == "completion" - def test_validate_and_set_defaults_missing_completion_params(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_missing_completion_params( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "gpt-4"}} self._patch_model_assembly( mocker, @@ -189,7 +196,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="completion_params is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker, valid_model_list): + def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker: MockerFixture, valid_model_list): """ Covers branch where provider does not contain '/' and ModelProviderID conversion is triggered (line 64). diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py index fd49072cd5..3fd21ab22b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.prompt_template.manager import ( PromptTemplateConfigManager, @@ -38,7 +39,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError, match="prompt_type is required"): PromptTemplateConfigManager.convert({}) - def test_convert_simple_prompt(self, mocker): + def test_convert_simple_prompt(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -56,7 +57,7 @@ class TestPromptTemplateConfigManagerConvert: assert result == "simple_entity" mock_prompt_entity_cls.assert_called_once_with(prompt_type="simple", simple_prompt_template="hello") - def test_convert_advanced_chat_valid(self, mocker): + def test_convert_advanced_chat_valid(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -97,7 +98,7 @@ class TestPromptTemplateConfigManagerConvert: {"text": "hi", "role": 123}, ], ) - def test_convert_advanced_invalid_message_fields(self, mocker, message): + def test_convert_advanced_invalid_message_fields(self, mocker: MockerFixture, message): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -114,7 +115,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError): PromptTemplateConfigManager.convert(config) - def test_convert_advanced_completion_with_roles(self, mocker): + def test_convert_advanced_completion_with_roles(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -154,7 +155,7 @@ class TestValidateAndSetDefaults: def setup_method(self): self.valid_model = {"mode": "chat"} - def _patch_prompt_type(self, mocker): + def _patch_prompt_type(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mocker.patch( @@ -163,7 +164,7 @@ class TestValidateAndSetDefaults: ) return mock_prompt_entity_cls - def test_default_prompt_type_set(self, mocker): + def test_default_prompt_type_set(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = {"model": self.valid_model} @@ -173,7 +174,7 @@ class TestValidateAndSetDefaults: assert result["prompt_type"] == "simple" assert isinstance(keys, list) - def test_invalid_prompt_type_raises(self, mocker): + def test_invalid_prompt_type_raises(self, mocker: MockerFixture): class InvalidEnum(DummyPromptType): def __iter__(self): return iter([DummyEnumValue("valid")]) @@ -191,7 +192,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_invalid_chat_prompt_config_type(self, mocker): + def test_invalid_chat_prompt_config_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -203,7 +204,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_simple_mode_invalid_pre_prompt_type(self, mocker): + def test_simple_mode_invalid_pre_prompt_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -215,7 +216,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_requires_one_config(self, mocker): + def test_advanced_requires_one_config(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -228,7 +229,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_invalid_model_mode(self, mocker): + def test_advanced_invalid_model_mode(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -240,7 +241,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_chat_prompt_length_exceeds(self, mocker): + def test_advanced_chat_prompt_length_exceeds(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -252,7 +253,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_completion_prefix_defaults_set_when_empty(self, mocker): + def test_completion_prefix_defaults_set_when_empty(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py index d9fe7004ff..b82417cfed 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.variables.manager import ( BasicVariablesConfigManager, @@ -15,7 +16,7 @@ class TestBasicVariablesConfigManagerConvert: assert variables == [] assert external == [] - def test_convert_external_data_tools_enabled_and_disabled(self, mocker): + def test_convert_external_data_tools_enabled_and_disabled(self, mocker: MockerFixture): config = { "external_data_tools": [ {"enabled": False}, @@ -232,7 +233,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_disabled_tool_skipped(self, mocker): + def test_validate_disabled_tool_skipped(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": False}]} spy = mocker.patch( @@ -250,7 +251,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_enabled_tool_calls_factory(self, mocker): + def test_validate_enabled_tool_calls_factory(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": True, "type": "tool", "config": {"a": 1}}]} spy = mocker.patch( @@ -263,7 +264,7 @@ class TestValidateExternalDataToolsAndSetDefaults: class TestValidateAndSetDefaultsIntegration: - def test_validate_and_set_defaults_calls_both(self, mocker): + def test_validate_and_set_defaults_calls_both(self, mocker: MockerFixture): config = {} spy_var = mocker.patch.object( diff --git a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py index e99852cf76..e2ab3e2192 100644 --- a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py @@ -2,6 +2,7 @@ from collections import UserDict from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.base_app_config_manager import BaseAppConfigManager @@ -12,7 +13,7 @@ class TestBaseAppConfigManager: return {"key": "value", "another": 123} @pytest.fixture - def mock_app_additional_features(self, mocker): + def mock_app_additional_features(self, mocker: MockerFixture): mock_instance = MagicMock() mocker.patch( "core.app.app_config.base_app_config_manager.AppAdditionalFeatures", @@ -21,7 +22,7 @@ class TestBaseAppConfigManager: return mock_instance @pytest.fixture - def mock_managers(self, mocker): + def mock_managers(self, mocker: MockerFixture): retrieval = mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", return_value="retrieval_result", @@ -72,7 +73,7 @@ class TestBaseAppConfigManager: ) def test_convert_features_all_modes( self, - mocker, + mocker: MockerFixture, mock_config_dict, mock_app_additional_features, mock_managers, @@ -107,7 +108,7 @@ class TestBaseAppConfigManager: mock_managers["speech_to_text"].assert_called_once_with(config=dict(mock_config_dict.items())) mock_managers["text_to_speech"].assert_called_once_with(config=dict(mock_config_dict.items())) - def test_convert_features_empty_config(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_empty_config(self, mocker: MockerFixture, mock_app_additional_features, mock_managers): # Arrange empty_config = {} mock_app_mode = MagicMock() @@ -143,7 +144,7 @@ class TestBaseAppConfigManager: with pytest.raises((TypeError, AttributeError)): BaseAppConfigManager.convert_features(invalid_config, "CHAT") - def test_convert_features_manager_exception_propagates(self, mocker, mock_config_dict): + def test_convert_features_manager_exception_propagates(self, mocker: MockerFixture, mock_config_dict): # Arrange mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", @@ -154,7 +155,9 @@ class TestBaseAppConfigManager: with pytest.raises(RuntimeError): BaseAppConfigManager.convert_features(mock_config_dict, "CHAT") - def test_convert_features_mapping_subclass(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_mapping_subclass( + self, mocker: MockerFixture, mock_app_additional_features, mock_managers + ): # Arrange class CustomMapping(UserDict): pass diff --git a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py index fa128aca87..dacd69a578 100644 --- a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py +++ b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.workflow_ui_based_app.variables.manager import ( WorkflowVariablesConfigManager, @@ -10,19 +11,19 @@ from core.app.app_config.workflow_ui_based_app.variables.manager import ( @pytest.fixture -def mock_workflow(mocker): +def mock_workflow(mocker: MockerFixture): workflow = mocker.MagicMock() workflow.graph_dict = {"nodes": []} return workflow @pytest.fixture -def mock_variable_entity(mocker): +def mock_variable_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.VariableEntity") @pytest.fixture -def mock_rag_entity(mocker): +def mock_rag_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.RagPipelineVariableEntity") diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py index af5d203f12..bc3b06cd1b 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py @@ -111,7 +111,7 @@ class TestAdvancedChatAppGeneratorInternals: workflow_id="workflow-id", ) - def test_generate_loads_conversation_and_files(self, monkeypatch): + def test_generate_loads_conversation_and_files(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() @@ -195,7 +195,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["application_generate_entity"].files == built_files assert build_files_called["called"] is True - def test_resume_delegates_to_generate(self, monkeypatch): + def test_resume_delegates_to_generate(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( task_id="task", @@ -235,7 +235,7 @@ class TestAdvancedChatAppGeneratorInternals: assert result == {"resumed": True} assert captured["graph_runtime_state"] is not None - def test_single_iteration_generate_builds_debug_task(self, monkeypatch): + def test_single_iteration_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -293,7 +293,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_iteration_run.node_id == "node-1" - def test_single_loop_generate_builds_debug_task(self, monkeypatch): + def test_single_loop_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -351,7 +351,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_loop_run.node_id == "node-2" - def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch): + def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -449,7 +449,7 @@ class TestAdvancedChatAppGeneratorInternals: assert isinstance(captured["conversation"], ConversationSnapshot) assert isinstance(captured["message"], MessageSnapshot) - def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch): + def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -535,7 +535,7 @@ class TestAdvancedChatAppGeneratorInternals: db_session.refresh.assert_not_called() db_session.close.assert_called_once() - def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch): + def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -594,7 +594,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch): + def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -658,7 +658,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_handles_stopped_error(self, monkeypatch): + def test_generate_worker_handles_stopped_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -732,7 +732,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_not_called() - def test_generate_worker_handles_validation_error(self, monkeypatch): + def test_generate_worker_handles_validation_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -816,7 +816,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch): + def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch: pytest.MonkeyPatch): app_config = self._build_app_config() @contextmanager @@ -897,7 +897,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -953,7 +953,7 @@ class TestAdvancedChatAppGeneratorInternals: stream=False, ) - def test_handle_response_re_raises_value_error(self, monkeypatch): + def test_handle_response_re_raises_value_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -1002,7 +1002,7 @@ class TestAdvancedChatAppGeneratorInternals: logger_exception.assert_called_once() - def test_generate_worker_handles_invoke_auth_error(self, monkeypatch): + def test_generate_worker_handles_invoke_auth_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -1088,7 +1088,7 @@ class TestAdvancedChatAppGeneratorInternals: assert queue_manager.publish_error.called - def test_generate_debugger_enables_retrieve_source(self, monkeypatch): + def test_generate_debugger_enables_retrieve_source(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -1167,7 +1167,7 @@ class TestAdvancedChatAppGeneratorInternals: assert app_config.additional_features.show_retrieve_source is True assert captured["application_generate_entity"].query == "hello" - def test_generate_service_api_sets_parent_message_id(self, monkeypatch): + def test_generate_service_api_sets_parent_message_id(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 64bcfa9a18..d8f794b483 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -132,7 +132,9 @@ class TestAdvancedChatGenerateTaskPipeline: pipeline._task_state.answer = "partial answer" pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=build_test_variable_pool( + variables=build_system_variables(workflow_execution_id="run-id"), + ), start_at=0.0, total_tokens=7, node_run_steps=3, @@ -224,7 +226,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -368,11 +370,13 @@ class TestAdvancedChatGenerateTaskPipeline: assert list(pipeline._handle_loop_next_event(loop_next)) == ["loop_next"] assert list(pipeline._handle_loop_completed_event(loop_done)) == ["loop_done"] - def test_workflow_finish_handlers(self, monkeypatch): + def test_workflow_finish_handlers(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish" @@ -583,7 +587,9 @@ class TestAdvancedChatGenerateTaskPipeline: self.items = items graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) @@ -593,7 +599,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert message.answer == "hello" assert message.message_metadata - def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch): + def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._message_end_to_stream_response = lambda: "end" saved: list[str] = [] @@ -614,10 +620,12 @@ class TestAdvancedChatGenerateTaskPipeline: assert responses == ["end"] assert saved == ["saved"] - def test_handle_message_end_event_applies_output_moderation(self, monkeypatch): + def test_handle_message_end_event_applies_output_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._base_task_pipeline.handle_output_moderation_when_task_finished = lambda answer: "safe" diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py index a871e8d93b..d47b70e950 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py @@ -2,6 +2,7 @@ import uuid from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.agent_chat.app_config_manager import ( @@ -11,7 +12,7 @@ from core.entities.agent_entities import PlanningStrategy class TestAgentChatAppConfigManagerGetAppConfig: - def test_get_app_config_override_config(self, mocker): + def test_get_app_config_override_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"ignored": True} @@ -45,7 +46,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.variables == "variables" assert result.external_data_variables == "external" - def test_get_app_config_conversation_specific(self, mocker): + def test_get_app_config_conversation_specific(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -76,7 +77,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.app_model_config_dict == app_model_config.to_dict.return_value assert result.app_model_config_from.value == "conversation-specific-config" - def test_get_app_config_latest_config(self, mocker): + def test_get_app_config_latest_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -107,7 +108,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: class TestAgentChatAppConfigManagerConfigValidate: - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {}, "user_input_form": {}, @@ -247,7 +248,7 @@ class TestValidateAgentModeAndSetDefaults: {"agent_mode": {"enabled": True, "tools": [{"dataset": {"enabled": True, "id": "bad"}}]}}, ) - def test_old_tool_dataset_id_not_exists(self, mocker): + def test_old_tool_dataset_id_not_exists(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=False, @@ -275,7 +276,7 @@ class TestValidateAgentModeAndSetDefaults: "tenant", {"agent_mode": {"enabled": True, "tools": [tool]}} ) - def test_valid_old_and_new_style_tools(self, mocker): + def test_valid_old_and_new_style_tools(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=True, diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py index 80f7f94b1a..6cd62c933a 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py @@ -2,6 +2,7 @@ import contextlib import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator from core.app.apps.exc import GenerateTaskStoppedError @@ -16,7 +17,7 @@ class DummyAccount: @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = AgentChatAppGenerator() mocker.patch( "core.app.apps.agent_chat.app_generator.current_app", @@ -27,19 +28,19 @@ def generator(mocker): class TestAgentChatAppGeneratorGenerate: - def test_generate_rejects_blocking_mode(self, generator, mocker): + def test_generate_rejects_blocking_mode(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={}, invoke_from=mocker.MagicMock(), streaming=False) - def test_generate_requires_query(self, generator, mocker): + def test_generate_requires_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={"inputs": {}}, invoke_from=mocker.MagicMock()) - def test_generate_rejects_non_string_query(self, generator, mocker): + def test_generate_rejects_non_string_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): @@ -50,7 +51,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=mocker.MagicMock(), ) - def test_generate_override_requires_debugger(self, generator, mocker): + def test_generate_override_requires_debugger(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") @@ -62,7 +63,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_success_with_debugger_override(self, generator, mocker): + def test_generate_success_with_debugger_override(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -142,7 +143,7 @@ class TestAgentChatAppGeneratorGenerate: assert result == {"result": "ok"} thread_obj.start.assert_called_once() - def test_generate_without_file_config(self, generator, mocker): + def test_generate_without_file_config(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -213,14 +214,14 @@ class TestAgentChatAppGeneratorGenerate: class TestAgentChatAppGeneratorWorker: @pytest.fixture(autouse=True) - def patch_context(self, mocker): + def patch_context(self, mocker: MockerFixture): @contextlib.contextmanager def ctx_manager(*args, **kwargs): yield mocker.patch("core.app.apps.agent_chat.app_generator.preserve_flask_contexts", ctx_manager) - def test_generate_worker_handles_generate_task_stopped(self, generator, mocker): + def test_generate_worker_handles_generate_task_stopped(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -250,7 +251,7 @@ class TestAgentChatAppGeneratorWorker: Exception("bad"), ], ) - def test_generate_worker_publishes_errors(self, generator, mocker, error): + def test_generate_worker_publishes_errors(self, generator, mocker: MockerFixture, error): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -271,7 +272,7 @@ class TestAgentChatAppGeneratorWorker: assert queue_manager.publish_error.called - def test_generate_worker_logs_value_error_when_debug(self, generator, mocker): + def test_generate_worker_logs_value_error_when_debug(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py index 4567b35480..0260235b03 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.agent.entities import AgentEntity from core.app.apps.agent_chat.app_runner import AgentChatAppRunner @@ -13,7 +14,7 @@ def runner(): class TestAgentChatAppRunnerRun: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", agent=mocker.MagicMock()) generate_entity = mocker.MagicMock(app_config=app_config, inputs={}, query="q", files=[], stream=True) @@ -22,7 +23,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_moderation_error_direct_output(self, runner, mocker): + def test_run_moderation_error_direct_output(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -45,7 +46,7 @@ class TestAgentChatAppRunnerRun: runner.direct_output.assert_called_once() - def test_run_annotation_reply_short_circuits(self, runner, mocker): + def test_run_annotation_reply_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -74,7 +75,7 @@ class TestAgentChatAppRunnerRun: queue_manager.publish.assert_called_once() runner.direct_output.assert_called_once() - def test_run_hosting_moderation_short_circuits(self, runner, mocker): + def test_run_hosting_moderation_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -98,7 +99,7 @@ class TestAgentChatAppRunnerRun: runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_model_schema_missing(self, runner, mocker): + def test_run_model_schema_missing(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -140,7 +141,7 @@ class TestAgentChatAppRunnerRun: (LLMMode.COMPLETION, "CotCompletionAgentRunner"), ], ) - def test_run_chain_of_thought_modes(self, runner, mocker, mode, expected_runner): + def test_run_chain_of_thought_modes(self, runner, mocker: MockerFixture, mode, expected_runner): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -196,7 +197,7 @@ class TestAgentChatAppRunnerRun: runner_instance.run.assert_called_once() runner._handle_invoke_result.assert_called_once() - def test_run_invalid_llm_mode_raises(self, runner, mocker): + def test_run_invalid_llm_mode_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -242,7 +243,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), conversation, message) - def test_run_function_calling_strategy_selected_by_features(self, runner, mocker): + def test_run_function_calling_strategy_selected_by_features(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -298,7 +299,7 @@ class TestAgentChatAppRunnerRun: assert app_config.agent.strategy == AgentEntity.Strategy.FUNCTION_CALLING runner_instance.run.assert_called_once() - def test_run_conversation_not_found(self, runner, mocker): + def test_run_conversation_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -332,7 +333,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_message_not_found(self, runner, mocker): + def test_run_message_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -366,7 +367,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_invalid_agent_strategy_raises(self, runner, mocker): + def test_run_invalid_agent_strategy_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock(strategy="invalid", provider="p", model="m") diff --git a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py index aa2085177e..8dcf6e9193 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.completion.app_runner as module from core.app.apps.completion.app_runner import CompletionAppRunner @@ -47,7 +48,7 @@ def _build_generate_entity(app_config, file_upload_config=None): class TestCompletionAppRunner: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -58,7 +59,7 @@ class TestCompletionAppRunner: with pytest.raises(ValueError): runner.run(app_generate_entity, MagicMock(), MagicMock()) - def test_run_moderation_error_outputs_direct(self, runner, mocker): + def test_run_moderation_error_outputs_direct(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -78,7 +79,7 @@ class TestCompletionAppRunner: runner.direct_output.assert_called_once() runner._handle_invoke_result.assert_not_called() - def test_run_hosting_moderation_stops(self, runner, mocker): + def test_run_hosting_moderation_stops(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -97,7 +98,7 @@ class TestCompletionAppRunner: runner._handle_invoke_result.assert_not_called() - def test_run_dataset_and_external_tools_flow(self, runner, mocker): + def test_run_dataset_and_external_tools_flow(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -140,7 +141,7 @@ class TestCompletionAppRunner: assert dataset_retrieval.retrieve.call_args.kwargs["query"] == "query_from_input" runner._handle_invoke_result.assert_called_once() - def test_run_uses_low_image_detail_default(self, runner, mocker): + def test_run_uses_low_image_detail_default(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py index 024bd8f302..353162be8c 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.completion.app_config_manager as module from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.completion.app_config_manager import CompletionAppConfigManager @@ -8,7 +10,7 @@ from models.model import AppMode class TestCompletionAppConfigManager: - def test_get_app_config_with_override(self, mocker): + def test_get_app_config_with_override(self, mocker: MockerFixture): app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -35,8 +37,8 @@ class TestCompletionAppConfigManager: assert result.external_data_variables == ["ext1"] assert result.app_mode == AppMode.COMPLETION - def test_get_app_config_without_override_uses_model_config(self, mocker): - app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) + def test_get_app_config_without_override_uses_model_config(self, mocker: MockerFixture): + app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -53,7 +55,7 @@ class TestCompletionAppConfigManager: assert result.app_model_config_from == EasyUIBasedAppModelConfigFrom.APP_LATEST_CONFIG assert result.app_model_config_dict == {"model": {"provider": "x"}} - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {"provider": "x"}, "variables": ["v"], diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py index f2e35f9900..de20dde677 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture import core.app.apps.completion.app_generator as module from core.app.apps.completion.app_generator import CompletionAppGenerator @@ -15,7 +16,7 @@ from services.errors.message import MessageNotExistsError @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = CompletionAppGenerator() mocker.patch.object(module, "copy_current_request_context", side_effect=lambda fn: fn) @@ -69,7 +70,7 @@ class TestCompletionAppGenerator: streaming=False, ) - def test_generate_success_no_file_config(self, generator, mocker): + def test_generate_success_no_file_config(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) mocker.patch.object(module.FileUploadConfigManager, "convert", return_value=None) @@ -99,7 +100,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_not_called() - def test_generate_success_with_files(self, generator, mocker): + def test_generate_success_with_files(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -131,7 +132,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_called_once() - def test_generate_override_model_config_debugger(self, generator, mocker): + def test_generate_override_model_config_debugger(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -165,7 +166,7 @@ class TestCompletionAppGenerator: assert get_app_config.call_args.kwargs["override_config_dict"] == override_config - def test_generate_more_like_this_message_not_found(self, generator, mocker): + def test_generate_more_like_this_message_not_found(self, generator, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -178,7 +179,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_disabled(self, generator, mocker): + def test_generate_more_like_this_disabled(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=False, more_like_this_dict={"enabled": False}) @@ -195,7 +196,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_app_model_config_missing(self, generator, mocker): + def test_generate_more_like_this_app_model_config_missing(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = None @@ -212,7 +213,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_message_config_none(self, generator, mocker): + def test_generate_more_like_this_message_config_none(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -229,7 +230,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_success(self, generator, mocker): + def test_generate_more_like_this_success(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -297,7 +298,7 @@ class TestCompletionAppGenerator: (RuntimeError("boom"), True), ], ) - def test_generate_worker_error_handling(self, generator, mocker, error, should_publish): + def test_generate_worker_error_handling(self, generator, mocker: MockerFixture, error, should_publish): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py index 5d4c9bcde0..6c1ee20ffb 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py @@ -1,12 +1,14 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.pipeline.pipeline_config_manager as module from core.app.apps.pipeline.pipeline_config_manager import PipelineConfigManager from models.model import AppMode -def test_get_pipeline_config(mocker): +def test_get_pipeline_config(mocker: MockerFixture): pipeline = MagicMock(tenant_id="tenant", id="pipe1") workflow = MagicMock(id="wf1") @@ -26,7 +28,7 @@ def test_get_pipeline_config(mocker): assert result.rag_pipeline_variables == ["var1"] -def test_config_validate_filters_related_keys(mocker): +def test_config_validate_filters_related_keys(mocker: MockerFixture): config = { "file_upload": {"enabled": True}, "tts": {"enabled": True}, diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index c36edf48fc..dd91243a37 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -3,6 +3,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, PropertyMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_generator as module from core.app.apps.exc import GenerateTaskStoppedError @@ -23,7 +24,7 @@ class FakeRagPipelineGenerateEntity(SimpleNamespace): @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = module.PipelineGenerator() mocker.patch.object(module, "RagPipelineGenerateEntity", FakeRagPipelineGenerateEntity) @@ -88,7 +89,7 @@ class DummySession: return False -def test_generate_dataset_missing(generator, mocker): +def test_generate_dataset_missing(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -106,7 +107,7 @@ def test_generate_dataset_missing(generator, mocker): ) -def test_generate_debugger_calls_generate(generator, mocker): +def test_generate_debugger_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -150,7 +151,7 @@ def test_generate_debugger_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker): +def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -228,7 +229,7 @@ def test_generate_published_pipeline_creates_documents_and_delay(generator, mock task_proxy.delay.assert_called_once() -def test_generate_is_retry_calls_generate(generator, mocker): +def test_generate_is_retry_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -273,7 +274,7 @@ def test_generate_is_retry_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_worker_handles_errors(generator, mocker): +def test_generate_worker_handles_errors(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -308,7 +309,7 @@ def test_generate_worker_handles_errors(generator, mocker): queue_manager.publish_error.assert_called_once() -def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker): +def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -341,7 +342,7 @@ def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker assert module.PipelineRunner.call_args.kwargs["system_user_id"] == "session" -def test_generate_raises_when_workflow_not_found(generator, mocker): +def test_generate_raises_when_workflow_not_found(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -369,7 +370,7 @@ def test_generate_raises_when_workflow_not_found(generator, mocker): ) -def test_generate_success_returns_converted(generator, mocker): +def test_generate_success_returns_converted(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -409,7 +410,7 @@ def test_generate_success_returns_converted(generator, mocker): assert result == "converted" -def test_single_iteration_generate_validates_inputs(generator, mocker): +def test_single_iteration_generate_validates_inputs(generator, mocker: MockerFixture): with pytest.raises(ValueError): generator.single_iteration_generate(_build_pipeline(), _build_workflow(), "", _build_user(), {}) @@ -419,7 +420,7 @@ def test_single_iteration_generate_validates_inputs(generator, mocker): ) -def test_single_iteration_generate_dataset_required(generator, mocker): +def test_single_iteration_generate_dataset_required(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -436,7 +437,7 @@ def test_single_iteration_generate_dataset_required(generator, mocker): ) -def test_single_iteration_generate_success(generator, mocker): +def test_single_iteration_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -476,7 +477,7 @@ def test_single_iteration_generate_success(generator, mocker): assert result == {"ok": True} -def test_single_loop_generate_success(generator, mocker): +def test_single_loop_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -516,7 +517,7 @@ def test_single_loop_generate_success(generator, mocker): assert result == {"ok": True} -def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker): +def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() app_entity = FakeRagPipelineGenerateEntity(task_id="t") @@ -536,7 +537,7 @@ def test_handle_response_value_error_triggers_generate_task_stopped(generator, m ) -def test_build_document_sets_metadata_for_builtin_fields(generator, mocker): +def test_build_document_sets_metadata_for_builtin_fields(generator, mocker: MockerFixture): class DummyDocument(SimpleNamespace): pass @@ -620,7 +621,7 @@ def test_format_datasource_info_list_missing_node_data(generator): ) -def test_format_datasource_info_list_online_drive_folder(generator, mocker): +def test_format_datasource_info_list_online_drive_folder(generator, mocker: MockerFixture): workflow = MagicMock( graph_dict={ "nodes": [ diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py index 9db83f5531..abfc76afa0 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_queue_manager as module from core.app.apps.base_app_queue_manager import PublishFrom @@ -16,7 +17,7 @@ from core.app.entities.queue_entities import ( from graphon.model_runtime.entities.llm_entities import LLMResult -def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): +def test_publish_sets_stop_listen_and_raises_on_stopped(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -28,7 +29,7 @@ def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): manager.stop_listen.assert_called_once() -def test_publish_stop_events_trigger_stop_listen(mocker): +def test_publish_stop_events_trigger_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -46,7 +47,7 @@ def test_publish_stop_events_trigger_stop_listen(mocker): manager.stop_listen.assert_called_once() -def test_publish_non_stop_event_no_stop_listen(mocker): +def test_publish_non_stop_event_no_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 603062a51c..1eed76cf84 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -22,6 +22,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_runner as module from core.app.apps.pipeline.pipeline_runner import PipelineRunner @@ -126,7 +127,7 @@ def test_update_document_status_on_failure(mocker, runner): session.commit.assert_called_once() -def test_run_pipeline_not_found(mocker): +def test_run_pipeline_not_found(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.invoke_from = InvokeFrom.WEB_APP app_generate_entity.single_iteration_run = None @@ -150,7 +151,7 @@ def test_run_pipeline_not_found(mocker): runner.run() -def test_run_workflow_not_initialized(mocker): +def test_run_workflow_not_initialized(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") @@ -174,7 +175,7 @@ def test_run_workflow_not_initialized(mocker): runner.run() -def test_run_single_iteration_path(mocker): +def test_run_single_iteration_path(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.single_iteration_run = MagicMock() @@ -223,7 +224,7 @@ def test_run_single_iteration_path(mocker): runner._handle_event.assert_called() -def test_run_normal_path_builds_graph(mocker): +def test_run_normal_path_builds_graph(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") diff --git a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py index f48a7fb38e..835c9a8576 100644 --- a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py @@ -45,7 +45,7 @@ def _make_generate_entity(app_config: WorkflowUIBasedAppConfig) -> AdvancedChatA @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -108,7 +108,7 @@ def test_init_generate_records_marks_existing_conversation(): assert entity.is_new_conversation is False -def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch): +def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch: pytest.MonkeyPatch): app_config = _make_app_config() entity = _make_generate_entity(app_config) entity.conversation_id = "existing-conversation-id" diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py index b0f8b423e1..f2a1700664 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py @@ -369,7 +369,7 @@ def test_validate_inputs_optional_file_with_empty_string_ignores_default(): class TestBaseAppGeneratorExtras: - def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch): + def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch: pytest.MonkeyPatch): base_app_generator = BaseAppGenerator() variables = [ diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py index 17de39ca99..c6eedf7be7 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py @@ -42,7 +42,7 @@ class _QueueRecorder: class TestAppRunner: - def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch): + def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -65,7 +65,7 @@ class TestAppRunner: assert model_config.parameters["max_tokens"] == 20 - def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch): + def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -86,7 +86,7 @@ class TestAppRunner: assert runner.recalc_llm_max_tokens(model_config, prompt_messages=[]) == -1 - def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch): + def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(model_conf=SimpleNamespace(model="mock"), stream=True) @@ -133,7 +133,7 @@ class TestAppRunner: stream=True, ) - def test_organize_prompt_messages_simple_template(self, monkeypatch): + def test_organize_prompt_messages_simple_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=["STOP"]) prompt_template_entity = PromptTemplateEntity( @@ -158,7 +158,7 @@ class TestAppRunner: assert prompt_messages == ["simple-message"] assert stop == ["simple-stop"] - def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="completion", stop=[""]) captured: dict[str, object] = {} @@ -191,7 +191,7 @@ class TestAppRunner: assert memory_config.role_prefix.user == "U" assert memory_config.role_prefix.assistant == "A" - def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=[""]) captured: dict[str, object] = {} @@ -245,7 +245,7 @@ class TestAppRunner: files=[], ) - def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch): + def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() warning_logger = MagicMock() @@ -284,7 +284,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.message.content == "abc" warning_logger.assert_called_once() - def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch): + def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() exception_logger = MagicMock() @@ -331,7 +331,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.usage == usage exception_logger.assert_called_once() - def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch): + def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() class _ToggleBool: @@ -367,7 +367,7 @@ class TestAppRunner: db_session.add.assert_not_called() queue_manager.publish.assert_not_called() - def test_check_hosting_moderation_direct_output_called(self, monkeypatch): + def test_check_hosting_moderation_direct_output_called(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(stream=False) @@ -388,7 +388,7 @@ class TestAppRunner: assert result is True assert direct_output.called - def test_fill_in_inputs_from_external_data_tools(self, monkeypatch): + def test_fill_in_inputs_from_external_data_tools(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.ExternalDataFetch.fetch", @@ -405,7 +405,7 @@ class TestAppRunner: assert result == {"foo": "bar"} - def test_moderation_for_inputs_returns_result(self, monkeypatch): + def test_moderation_for_inputs_returns_result(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.InputModeration.check", @@ -424,7 +424,7 @@ class TestAppRunner: assert result == (True, {}, "") - def test_query_app_annotations_to_reply(self, monkeypatch): + def test_query_app_annotations_to_reply(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.AnnotationReplyFeature.query", diff --git a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py index 1250ac5ecf..6a9b5e7619 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py @@ -85,7 +85,7 @@ def _make_chat_generate_entity(app_config: EasyUIBasedAppConfig) -> ChatAppGener @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -130,7 +130,7 @@ def test_init_generate_records_sets_conversation_fields_for_chat_entity(): class TestMessageBasedAppGeneratorExtras: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() class _Pipeline: @@ -155,7 +155,7 @@ class TestMessageBasedAppGeneratorExtras: stream=False, ) - def test_get_app_model_config_requires_valid_config(self, monkeypatch): + def test_get_app_model_config_requires_valid_config(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() app_model = SimpleNamespace(id="app", app_model_config_id=None, app_model_config=None) diff --git a/api/tests/unit_tests/core/app/apps/test_pause_resume.py b/api/tests/unit_tests/core/app/apps/test_pause_resume.py index 6104b8d6ca..1acebfee17 100644 --- a/api/tests/unit_tests/core/app/apps/test_pause_resume.py +++ b/api/tests/unit_tests/core/app/apps/test_pause_resume.py @@ -3,6 +3,8 @@ import time from types import ModuleType, SimpleNamespace from typing import Any +from pytest_mock import MockerFixture + import graphon.nodes.human_input.entities # noqa: F401 from core.app.apps.advanced_chat import app_generator as adv_app_gen_module from core.app.apps.workflow import app_generator as wf_app_gen_module @@ -58,7 +60,7 @@ class _StubToolNode(Node[_StubToolNodeData]): def __init__( self, node_id: str, - config: _StubToolNodeData, + data: _StubToolNodeData, *, graph_init_params, graph_runtime_state, @@ -66,7 +68,7 @@ class _StubToolNode(Node[_StubToolNodeData]): ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) @@ -101,7 +103,7 @@ class _StubToolNode(Node[_StubToolNodeData]): yield self._convert_node_run_result_to_graph_node_event(result) -def _patch_tool_node(mocker): +def _patch_tool_node(mocker: MockerFixture): original_resolve_node_class = node_factory_module.resolve_workflow_node_class def _patched_resolve_node_class(*, node_type: NodeType, node_version: str) -> type[Node]: @@ -167,7 +169,7 @@ def _build_graph(runtime_state: GraphRuntimeState, *, pause_on: str | None) -> G def _build_runtime_state(run_id: str) -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="user", app_id="app", workflow_id="workflow"), user_inputs={}, conversation_variables=[], @@ -196,7 +198,7 @@ def _node_successes(events: list[GraphEngineEvent]) -> list[str]: return [evt.node_id for evt in events if isinstance(evt, NodeRunSucceededEvent)] -def test_workflow_app_pause_resume_matches_baseline(mocker): +def test_workflow_app_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("baseline") @@ -236,7 +238,7 @@ def test_workflow_app_pause_resume_matches_baseline(mocker): assert resumed_state.outputs == baseline_outputs -def test_advanced_chat_pause_resume_matches_baseline(mocker): +def test_advanced_chat_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("adv-baseline") diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index 58f0e47a4b..12f3ed9f07 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -54,7 +54,7 @@ class FakeTopic: return self._state["subscribed"] -def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch): +def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() def fake_get_response_topic(cls, app_mode, workflow_run_id): @@ -92,7 +92,7 @@ def test_normalize_terminal_events_empty_values(): assert _normalize_terminal_events([]) == set({}) -def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): +def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py index 7e8367c6c4..0e9f8b6f35 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + from core.app.apps.workflow.app_generator import SKIP_PREPARE_USER_INPUTS_KEY, WorkflowAppGenerator @@ -22,7 +24,7 @@ def test_should_prepare_user_inputs_keeps_validation_when_flag_false(): assert WorkflowAppGenerator()._should_prepare_user_inputs(args) -def test_resume_delegates_to_generate(mocker): +def test_resume_delegates_to_generate(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_generate = mocker.patch.object(generator, "_generate", return_value="ok") @@ -52,7 +54,7 @@ def test_resume_delegates_to_generate(mocker): assert kwargs["invoke_from"] == "debugger" -def test_generate_appends_pause_layer_and_forwards_state(mocker): +def test_generate_appends_pause_layer_and_forwards_state(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_queue_manager = MagicMock() @@ -124,7 +126,7 @@ def test_generate_appends_pause_layer_and_forwards_state(mocker): assert worker_kwargs["kwargs"]["graph_runtime_state"] is graph_runtime_state -def test_resume_path_runs_worker_with_runtime_state(mocker): +def test_resume_path_runs_worker_with_runtime_state(mocker: MockerFixture): generator = WorkflowAppGenerator() runtime_state = MagicMock(name="runtime-state") diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py index 58c7bfa4bc..3949c41eae 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py @@ -54,7 +54,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -90,10 +90,10 @@ class TestWorkflowBasedAppRunner: with pytest.raises(ValueError, match="Neither single_iteration_run nor single_loop_run"): runner._prepare_single_node_execution(workflow, None, None, user_id="00000000-0000-0000-0000-000000000001") - def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch): + def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch: pytest.MonkeyPatch): runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -142,7 +142,9 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool is graph_runtime_state.variable_pool - def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init(self, monkeypatch): + def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init( + self, monkeypatch: pytest.MonkeyPatch + ): variable_loader = SimpleNamespace( load_variables=lambda selectors: ( [ @@ -162,7 +164,7 @@ class TestWorkflowBasedAppRunner: app_id="app", ) graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -232,7 +234,7 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool.get(["sys", "conversation_id"]).value == "conv-1" - def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch): + def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch: pytest.MonkeyPatch): published: list[object] = [] class _QueueManager: @@ -241,7 +243,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) graph_runtime_state.register_paused_node("node-1") @@ -284,7 +286,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) @@ -423,7 +425,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py index 620a153204..248fed5388 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py @@ -16,7 +16,7 @@ from models.workflow import Workflow def _make_graph_state(): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, environment_variables=[], diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py index 09ad078a70..320189143e 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py @@ -67,7 +67,7 @@ class TestWorkflowAppGeneratorValidation: class TestWorkflowAppGeneratorHandleResponse: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -116,7 +116,7 @@ class TestWorkflowAppGeneratorHandleResponse: class TestWorkflowAppGeneratorGenerate: - def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch): + def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 0bcc1029b0..ea21a1cc1a 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -95,7 +95,9 @@ class TestWorkflowGenerateTaskPipeline: def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=build_test_variable_pool( + variables=build_system_variables(workflow_execution_id="run-id"), + ), start_at=0.0, total_tokens=5, node_run_steps=2, @@ -187,7 +189,7 @@ class TestWorkflowGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -283,7 +285,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish" @@ -408,7 +412,7 @@ class TestWorkflowGenerateTaskPipeline: assert list(pipeline._handle_human_input_form_timeout_event(timeout_event)) == ["timeout"] assert list(pipeline._handle_agent_log_event(agent_event)) == ["log"] - def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch): + def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -560,7 +564,7 @@ class TestWorkflowGenerateTaskPipeline: responses = list(pipeline._wrapper_process_stream_response()) assert responses == [PingStreamResponse(task_id="task")] - def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch): + def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -597,7 +601,7 @@ class TestWorkflowGenerateTaskPipeline: assert sleep_spy assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch): + def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -633,7 +637,7 @@ class TestWorkflowGenerateTaskPipeline: assert logger_exception assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_database_session_rolls_back_on_error(self, monkeypatch): + def test_database_session_rolls_back_on_error(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() calls = {"enter": 0, "exit_exc": None} @@ -725,7 +729,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) @@ -753,7 +759,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._handle_ping_event = lambda event, **kwargs: iter(["ping"]) @@ -769,7 +777,9 @@ class TestWorkflowGenerateTaskPipeline: def test_process_stream_response_main_match_paths_and_cleanup(self): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._base_task_pipeline.queue_manager.listen = lambda: iter( diff --git a/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py b/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py index d3bd15b6f3..320a3bc42c 100644 --- a/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py +++ b/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py @@ -21,7 +21,9 @@ class TestTriggerPostLayer: ) runtime_state = SimpleNamespace( outputs={"answer": "ok"}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=12, ) @@ -60,7 +62,9 @@ class TestTriggerPostLayer: def test_on_event_handles_missing_trigger_log(self): runtime_state = SimpleNamespace( outputs={}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=0, ) @@ -91,7 +95,9 @@ class TestTriggerPostLayer: def test_on_event_ignores_non_status_events(self): runtime_state = SimpleNamespace( outputs={}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=0, ) diff --git a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py index a20d89d807..f10e0084d0 100644 --- a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py @@ -143,7 +143,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._listen_audio_msg(publisher=None, task_id="task") is None - def test_process_stream_response_handles_chunks_and_end(self, monkeypatch): + def test_process_stream_response_handles_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -245,7 +245,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(event, QueueLLMChunkEvent) for event in events) assert any(isinstance(event, QueueStopEvent) for event in events) - def test_handle_stop_updates_usage(self, monkeypatch): + def test_handle_stop_updates_usage(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -313,7 +313,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._task_state.llm_result.usage.prompt_tokens == 10 assert pipeline._task_state.llm_result.usage.completion_tokens == 5 - def test_record_files_builds_file_payloads(self, monkeypatch): + def test_record_files_builds_file_payloads(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -405,7 +405,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert files assert len(files) == 3 - def test_process_stream_response_handles_annotation_and_error(self, monkeypatch): + def test_process_stream_response_handles_annotation_and_error(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -472,7 +472,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert isinstance(responses[-1], ValueError) assert pipeline._task_state.llm_result.message.content == "annotatedagent" - def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -681,7 +681,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses == ["payload"] - def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch): + def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -715,7 +715,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses[1] == "payload" assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch): + def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -756,7 +756,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(item, MessageAudioStreamResponse) for item in responses) assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch): + def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -896,7 +896,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert list(pipeline._process_stream_response(publisher=None)) == [] - def test_save_message_persists_fields_and_emits_trace(self, monkeypatch): + def test_save_message_persists_fields_and_emits_trace(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -981,7 +981,7 @@ class TestEasyUiBasedGenerateTaskPipeline: with pytest.raises(ValueError, match="Conversation conv not found"): pipeline._save_message(session=session) - def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch): + def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1021,7 +1021,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.metadata["usage"]["prompt_tokens"] == 1 - def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch): + def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1059,7 +1059,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.files is None - def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch): + def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1155,7 +1155,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.answer == "hello" - def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( diff --git a/api/tests/unit_tests/core/app/test_llm_quota.py b/api/tests/unit_tests/core/app/test_llm_quota.py new file mode 100644 index 0000000000..d9390a4a8f --- /dev/null +++ b/api/tests/unit_tests/core/app/test_llm_quota.py @@ -0,0 +1,617 @@ +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import pytest +from sqlalchemy import create_engine, select + +from configs import dify_config +from core.app.llm.quota import ( + deduct_llm_quota, + deduct_llm_quota_for_model, + ensure_llm_quota_available, + ensure_llm_quota_available_for_model, +) +from core.entities.model_entities import ModelStatus +from core.entities.provider_entities import ProviderQuotaType, QuotaUnit +from core.errors.error import QuotaExceededError +from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.model_entities import ModelType +from models import TenantCreditPool +from models.enums import ProviderQuotaType as ModelProviderQuotaType +from models.provider import Provider, ProviderType + + +def test_ensure_llm_quota_available_for_model_raises_when_system_model_is_exhausted() -> None: + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + get_provider_model=MagicMock(return_value=SimpleNamespace(status=ModelStatus.QUOTA_EXCEEDED)), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + provider_configuration.get_provider_model.assert_called_once_with( + model_type=ModelType.LLM, + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_for_model_raises_when_provider_is_missing() -> None: + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = None + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + pytest.raises(ValueError, match="Provider openai does not exist."), + ): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_for_model_ignores_custom_provider_configuration() -> None: + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.CUSTOM, + get_provider_model=MagicMock(), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + provider_configuration.get_provider_model.assert_not_called() + + +def test_deduct_llm_quota_for_model_uses_identity_based_trial_billing() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 42 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=42, + ) + + +def test_deduct_llm_quota_for_model_caps_trial_pool_when_usage_exceeds_remaining() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": "trial-pool", + "tenant_id": "tenant-id", + "pool_type": ModelProviderQuotaType.TRIAL, + "quota_limit": 10, + "quota_used": 9, + }, + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == "trial-pool")) + + assert quota_used == 10 + + +def test_deduct_llm_quota_for_model_returns_for_unbounded_quota() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 42 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=-1, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + + +def test_deduct_llm_quota_for_model_uses_credit_configuration() -> None: + usage = LLMUsage.empty_usage() + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.CREDITS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch.object(type(dify_config), "get_model_credits", return_value=9) as mock_get_model_credits, + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_get_model_credits.assert_called_once_with("gpt-4o") + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=9, + ) + + +def test_deduct_llm_quota_for_model_uses_single_charge_for_times_quota() -> None: + usage = LLMUsage.empty_usage() + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TIMES, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=1, + ) + + +def test_deduct_llm_quota_for_model_uses_paid_billing_pool() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 5 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.PAID, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.PAID, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=5, + pool_type="paid", + ) + + +def test_deduct_llm_quota_for_model_updates_free_quota_usage() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.FREE, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.FREE, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + Provider.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + Provider.__table__.insert(), + [ + { + "id": "matching-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 10, + "is_valid": True, + }, + { + "id": "other-tenant", + "tenant_id": "other-tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 20, + "is_valid": True, + }, + { + "id": "other-provider", + "tenant_id": "tenant-id", + "provider_name": "anthropic", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 30, + "is_valid": True, + }, + { + "id": "custom-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.CUSTOM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 40, + "is_valid": True, + }, + ], + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used_by_id = dict(connection.execute(select(Provider.id, Provider.quota_used)).all()) + + assert quota_used_by_id == { + "matching-provider": 13, + "other-tenant": 20, + "other-provider": 30, + "custom-provider": 40, + } + + with engine.begin() as connection: + connection.execute( + Provider.__table__.update().where(Provider.id == "matching-provider").values(quota_limit=13, quota_used=13) + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + exhausted_quota_used = connection.scalar(select(Provider.quota_used).where(Provider.id == "matching-provider")) + + assert exhausted_quota_used == 13 + + +def test_deduct_llm_quota_for_model_caps_free_quota_and_raises_when_usage_exceeds_remaining() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.FREE, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.FREE, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + Provider.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + Provider.__table__.insert(), + { + "id": "matching-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 15, + "quota_used": 13, + "is_valid": True, + }, + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(Provider.quota_used).where(Provider.id == "matching-provider")) + + assert quota_used == 15 + + +def test_deduct_llm_quota_for_model_ignores_unknown_quota_type() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 2 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type="unexpected", + quota_configurations=[ + SimpleNamespace( + quota_type="unexpected", + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + patch("core.app.llm.quota.sessionmaker") as mock_sessionmaker, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + mock_sessionmaker.assert_not_called() + + +def test_deduct_llm_quota_for_model_ignores_custom_provider_configuration() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 2 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.CUSTOM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + patch("core.app.llm.quota.sessionmaker") as mock_sessionmaker, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + mock_sessionmaker.assert_not_called() + + +def test_ensure_llm_quota_available_wrapper_warns_and_delegates() -> None: + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace(tenant_id="tenant-id")), + model_type_instance=SimpleNamespace(model_type=ModelType.LLM), + ) + + with ( + pytest.deprecated_call(match="ensure_llm_quota_available\\(model_instance=.*deprecated"), + patch("core.app.llm.quota.ensure_llm_quota_available_for_model") as mock_ensure, + ): + ensure_llm_quota_available(model_instance=model_instance) + + mock_ensure.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_wrapper_rejects_non_llm_model_instances() -> None: + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace(tenant_id="tenant-id")), + model_type_instance=SimpleNamespace(model_type=ModelType.TEXT_EMBEDDING), + ) + + with ( + pytest.deprecated_call(match="ensure_llm_quota_available\\(model_instance=.*deprecated"), + pytest.raises(ValueError, match="only support LLM model instances"), + ): + ensure_llm_quota_available(model_instance=model_instance) + + +def test_deduct_llm_quota_wrapper_warns_and_delegates() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 7 + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + model_type_instance=SimpleNamespace(model_type=ModelType.LLM), + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace()), + ) + + with ( + pytest.deprecated_call(match="deduct_llm_quota\\(tenant_id=.*deprecated"), + patch("core.app.llm.quota.deduct_llm_quota_for_model") as mock_deduct, + ): + deduct_llm_quota( + tenant_id="tenant-id", + model_instance=model_instance, + usage=usage, + ) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + +def test_deduct_llm_quota_wrapper_rejects_non_llm_model_instances() -> None: + usage = LLMUsage.empty_usage() + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + model_type_instance=SimpleNamespace(model_type=ModelType.TEXT_EMBEDDING), + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace()), + ) + + with ( + pytest.deprecated_call(match="deduct_llm_quota\\(tenant_id=.*deprecated"), + pytest.raises(ValueError, match="only support LLM model instances"), + ): + deduct_llm_quota( + tenant_id="tenant-id", + model_instance=model_instance, + usage=usage, + ) diff --git a/api/tests/unit_tests/core/app/workflow/test_node_factory.py b/api/tests/unit_tests/core/app/workflow/test_node_factory.py index 30a068f4c5..addce649d5 100644 --- a/api/tests/unit_tests/core/app/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/app/workflow/test_node_factory.py @@ -8,9 +8,9 @@ from graphon.enums import BuiltinNodeTypes class DummyNode: - def __init__(self, *, node_id, config, graph_init_params, graph_runtime_state, **kwargs): + def __init__(self, *, node_id, data, graph_init_params, graph_runtime_state, **kwargs): self.id = node_id - self.config = config + self.data = data self.graph_init_params = graph_init_params self.graph_runtime_state = graph_runtime_state self.kwargs = kwargs @@ -46,7 +46,7 @@ class TestDifyNodeFactory: lambda **_kwargs: node_class, ) - def _factory(self, monkeypatch): + def _factory(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_STRING_LENGTH", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_NUMBER", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MIN_NUMBER", -10) @@ -72,20 +72,20 @@ class TestDifyNodeFactory: graph_runtime_state=SimpleNamespace(), ) - def test_create_node_unknown_type(self, monkeypatch): + def test_create_node_unknown_type(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": "unknown"}}) - def test_create_node_missing_mapping(self, monkeypatch): + def test_create_node_missing_mapping(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr("core.workflow.node_factory.get_node_type_classes_mapping", lambda: {}) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_missing_latest_class(self, monkeypatch): + def test_create_node_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr( "core.workflow.node_factory.get_node_type_classes_mapping", @@ -96,7 +96,7 @@ class TestDifyNodeFactory: with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_selects_versioned_class(self, monkeypatch): + def test_create_node_selects_versioned_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) selected_versions: list[tuple[str, str]] = [] @@ -115,7 +115,7 @@ class TestDifyNodeFactory: assert node.id == "node-1" assert selected_versions == [("snapshot", "called")] - def test_create_node_code_branch(self, monkeypatch): + def test_create_node_code_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyCodeNode) @@ -124,7 +124,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyCodeNode) assert node.id == "node-1" - def test_create_node_template_transform_branch(self, monkeypatch): + def test_create_node_template_transform_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyTemplateTransformNode) @@ -133,7 +133,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyTemplateTransformNode) assert "jinja2_template_renderer" in node.kwargs - def test_create_node_http_request_branch(self, monkeypatch): + def test_create_node_http_request_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyHttpRequestNode) @@ -142,7 +142,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyHttpRequestNode) assert "http_request_config" in node.kwargs - def test_create_node_knowledge_retrieval_branch(self, monkeypatch): + def test_create_node_knowledge_retrieval_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyKnowledgeRetrievalNode) @@ -151,7 +151,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyKnowledgeRetrievalNode) assert node.kwargs == {} - def test_create_node_document_extractor_branch(self, monkeypatch): + def test_create_node_document_extractor_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyDocumentExtractorNode) diff --git a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py index 82552470a9..04ce524904 100644 --- a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py +++ b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py @@ -2,12 +2,14 @@ from __future__ import annotations from types import SimpleNamespace +import pytest + from core.app.workflow.layers.observability import ObservabilityLayer from graphon.enums import BuiltinNodeTypes class TestObservabilityLayerExtras: - def test_init_tracer_enabled_sets_tracer(self, monkeypatch): + def test_init_tracer_enabled_sets_tracer(self, monkeypatch: pytest.MonkeyPatch): tracer = object() monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -18,7 +20,7 @@ class TestObservabilityLayerExtras: assert layer._is_disabled is False assert layer._tracer is tracer - def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch, caplog): + def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch: pytest.MonkeyPatch, caplog): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -33,7 +35,7 @@ class TestObservabilityLayerExtras: assert layer._tracer is None assert "Failed to get OpenTelemetry tracer" in caplog.text - def test_init_tracer_disables_when_otel_disabled(self, monkeypatch): + def test_init_tracer_disables_when_otel_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", False) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -143,7 +145,7 @@ class TestObservabilityLayerExtras: assert layer._node_contexts == {} - def test_on_node_run_end_calls_span_end(self, monkeypatch): + def test_on_node_run_end_calls_span_end(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False ended: list[str] = [] @@ -164,7 +166,7 @@ class TestObservabilityLayerExtras: assert ended == ["ended"] assert "exec" not in layer._node_contexts - def test_on_node_run_end_logs_detach_failure(self, monkeypatch, caplog): + def test_on_node_run_end_logs_detach_failure(self, monkeypatch: pytest.MonkeyPatch, caplog): layer = ObservabilityLayer() layer._is_disabled = False @@ -186,7 +188,7 @@ class TestObservabilityLayerExtras: assert "Failed to detach OpenTelemetry token" in caplog.text assert "exec" not in layer._node_contexts - def test_on_node_run_start_and_end_creates_span(self, monkeypatch): + def test_on_node_run_start_and_end_creates_span(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False diff --git a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py index cacb4dd4fa..7e87c088ce 100644 --- a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py +++ b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py @@ -60,7 +60,10 @@ def _make_layer( workflow_execution_id="run-id", conversation_id="conv-id", ) - runtime_state = GraphRuntimeState(variable_pool=VariablePool(system_variables=system_variables), start_at=0.0) + runtime_state = GraphRuntimeState( + variable_pool=VariablePool.from_bootstrap(system_variables=system_variables), + start_at=0.0, + ) read_only_state = ReadOnlyGraphRuntimeStateWrapper(runtime_state) application_generate_entity = WorkflowAppGenerateEntity.model_construct( @@ -120,7 +123,7 @@ class TestWorkflowPersistenceLayer: with pytest.raises(ValueError, match="workflow_execution_id must be provided"): layer._get_execution_id() - def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch): + def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch: pytest.MonkeyPatch): layer, _, _, _ = _make_layer() monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py index 7b433ab57b..1125ce6dbc 100644 --- a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py +++ b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py @@ -3,6 +3,7 @@ import queue from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.base.tts.app_generator_tts_publisher import ( AppGeneratorTTSPublisher, @@ -17,7 +18,7 @@ from core.base.tts.app_generator_tts_publisher import ( @pytest.fixture -def mock_model_instance(mocker): +def mock_model_instance(mocker: MockerFixture): model = mocker.MagicMock() model.invoke_tts.return_value = [b"audio1", b"audio2"] model.get_tts_voices.return_value = [{"value": "voice1"}, {"value": "voice2"}] @@ -33,7 +34,7 @@ def mock_model_manager(mocker, mock_model_instance): @pytest.fixture(autouse=True) -def patch_threads(mocker): +def patch_threads(mocker: MockerFixture): """Prevent real threads from starting during tests""" mocker.patch("threading.Thread.start", return_value=None) @@ -114,7 +115,7 @@ class TestProcessFuture: finish = audio_queue.get() assert finish.status == "finish" - def test_process_future_exception(self, mocker): + def test_process_future_exception(self, mocker: MockerFixture): future_queue = queue.Queue() audio_queue = queue.Queue() @@ -222,7 +223,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker): + def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -297,7 +298,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -332,7 +333,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "Hello " - def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -358,7 +359,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "" - def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker): + def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() diff --git a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py index 4c1aa33540..f9b3b1864e 100644 --- a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py @@ -1,8 +1,10 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.callback_handler.agent_tool_callback_handler as module +from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler # ----------------------------- # Fixtures @@ -10,17 +12,17 @@ import core.callback_handler.agent_tool_callback_handler as module @pytest.fixture -def enable_debug(mocker): +def enable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", True) @pytest.fixture -def disable_debug(mocker): +def disable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", False) @pytest.fixture -def mock_print(mocker): +def mock_print(mocker: MockerFixture): return mocker.patch("builtins.print") @@ -71,7 +73,7 @@ class TestPrintText: module.print_text("hello") mock_print.assert_called_once_with("hello", end="", file=None) - def test_print_text_with_color(self, mocker, mock_print): + def test_print_text_with_color(self, mocker: MockerFixture, mock_print): mock_get_color = mocker.patch( "core.callback_handler.agent_tool_callback_handler.get_colored_text", return_value="colored_text", @@ -82,7 +84,7 @@ class TestPrintText: mock_get_color.assert_called_once_with("hello", "green") mock_print.assert_called_once_with("colored_text", end="", file=None) - def test_print_text_with_file_flush(self, mocker): + def test_print_text_with_file_flush(self, mocker: MockerFixture): mock_file = MagicMock() mock_print = mocker.patch("builtins.print") @@ -107,21 +109,25 @@ class TestDifyAgentCallbackHandler: assert handler.color == "green" assert handler.current_loop == 1 - def test_on_tool_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_start_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_called() - def test_on_tool_start_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_start_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_not_called() - def test_on_tool_end_debug_enabled_and_trace(self, handler, enable_debug, mocker): + def test_on_tool_end_debug_enabled_and_trace( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") mock_trace_manager = MagicMock() @@ -137,7 +143,9 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 mock_trace_manager.add_trace_task.assert_called_once() - def test_on_tool_end_without_trace_manager(self, handler, enable_debug, mocker): + def test_on_tool_end_without_trace_manager( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_end( @@ -148,14 +156,16 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 - def test_on_tool_error_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_error_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) mock_print_text.assert_called_once() - def test_on_tool_error_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_error_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) @@ -163,14 +173,16 @@ class TestDifyAgentCallbackHandler: mock_print_text.assert_not_called() @pytest.mark.parametrize("thought", ["thinking", ""]) - def test_on_agent_start(self, handler, enable_debug, mocker, thought): + def test_on_agent_start(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture, thought): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_agent_start(thought) mock_print_text.assert_called() - def test_on_agent_finish_increments_loop(self, handler, enable_debug, mocker): + def test_on_agent_finish_increments_loop( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") current_loop = handler.current_loop @@ -179,19 +191,21 @@ class TestDifyAgentCallbackHandler: assert handler.current_loop == current_loop + 1 mock_print_text.assert_called() - def test_on_datasource_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_datasource_start_debug_enabled( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_datasource_start("ds1", {"x": 1}) mock_print_text.assert_called_once() - def test_ignore_agent_property(self, disable_debug, handler): + def test_ignore_agent_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is True - def test_ignore_chat_model_property(self, disable_debug, handler): + def test_ignore_chat_model_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_chat_model is True - def test_ignore_properties_when_debug_enabled(self, enable_debug, handler): + def test_ignore_properties_when_debug_enabled(self, enable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is False assert handler.ignore_chat_model is False diff --git a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py index 8e5670e9be..f23669c3c7 100644 --- a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom from core.callback_handler.index_tool_callback_handler import ( @@ -7,12 +8,12 @@ from core.callback_handler.index_tool_callback_handler import ( @pytest.fixture -def mock_queue_manager(mocker): +def mock_queue_manager(mocker: MockerFixture): return mocker.Mock() @pytest.fixture -def handler(mock_queue_manager, mocker): +def handler(mock_queue_manager, mocker: MockerFixture): mocker.patch( "core.callback_handler.index_tool_callback_handler.db", ) @@ -34,7 +35,7 @@ class TestOnQuery: (InvokeFrom.WEB_APP, "end_user"), ], ) - def test_on_query_success_roles(self, mocker, mock_queue_manager, invoke_from, expected_role): + def test_on_query_success_roles(self, mocker: MockerFixture, mock_queue_manager, invoke_from, expected_role): # Arrange mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") @@ -57,7 +58,7 @@ class TestOnQuery: assert dataset_query.created_by_role == expected_role mock_db.session.commit.assert_called_once() - def test_on_query_none_values(self, mocker, mock_queue_manager): + def test_on_query_none_values(self, mocker: MockerFixture, mock_queue_manager): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") handler = DatasetIndexToolCallbackHandler( @@ -75,7 +76,7 @@ class TestOnQuery: class TestOnToolEnd: - def test_on_tool_end_no_metadata(self, handler, mocker): + def test_on_tool_end_no_metadata(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") document = mocker.Mock() @@ -85,7 +86,9 @@ class TestOnToolEnd: mock_db.session.commit.assert_not_called() - def test_on_tool_end_dataset_document_not_found(self, handler, mocker): + def test_on_tool_end_dataset_document_not_found( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_db.session.scalar.return_value = None @@ -96,7 +99,9 @@ class TestOnToolEnd: mock_db.session.scalar.assert_called_once() - def test_on_tool_end_parent_child_index_with_child(self, handler, mocker): + def test_on_tool_end_parent_child_index_with_child( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -119,7 +124,7 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_non_parent_child_index(self, handler, mocker): + def test_on_tool_end_non_parent_child_index(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -139,12 +144,12 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_empty_documents(self, handler): + def test_on_tool_end_empty_documents(self, handler: DatasetIndexToolCallbackHandler): handler.on_tool_end([]) class TestReturnRetrieverResourceInfo: - def test_publish_called(self, handler, mock_queue_manager, mocker): + def test_publish_called(self, handler: DatasetIndexToolCallbackHandler, mock_queue_manager, mocker: MockerFixture): mock_event = mocker.patch("core.callback_handler.index_tool_callback_handler.QueueRetrieverResourcesEvent") resources = [mocker.Mock()] diff --git a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py index 131fb006ed..5b53c5965c 100644 --- a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, call import pytest +from pytest_mock import MockerFixture from core.callback_handler.workflow_tool_callback_handler import ( DifyWorkflowCallbackHandler, @@ -26,13 +27,13 @@ def handler(): @pytest.fixture -def mock_print_text(mocker): +def mock_print_text(mocker: MockerFixture): """Mock print_text to avoid real stdout printing.""" return mocker.patch("core.callback_handler.workflow_tool_callback_handler.print_text") class TestDifyWorkflowCallbackHandler: - def test_on_tool_execution_single_output_success(self, handler, mock_print_text): + def test_on_tool_execution_single_output_success(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "test_tool" tool_inputs = {"a": 1} @@ -62,7 +63,7 @@ class TestDifyWorkflowCallbackHandler: ] ) - def test_on_tool_execution_multiple_outputs(self, handler, mock_print_text): + def test_on_tool_execution_multiple_outputs(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "multi_tool" outputs = [ @@ -83,7 +84,7 @@ class TestDifyWorkflowCallbackHandler: assert results == outputs assert mock_print_text.call_count == 4 * len(outputs) - def test_on_tool_execution_empty_iterable(self, handler, mock_print_text): + def test_on_tool_execution_empty_iterable(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "empty_tool" @@ -108,7 +109,9 @@ class TestDifyWorkflowCallbackHandler: ("not_iterable", AttributeError), ], ) - def test_on_tool_execution_invalid_outputs_type(self, handler, invalid_outputs, expected_exception): + def test_on_tool_execution_invalid_outputs_type( + self, handler: DifyWorkflowCallbackHandler, invalid_outputs, expected_exception + ): # Arrange tool_name = "invalid_tool" @@ -122,7 +125,7 @@ class TestDifyWorkflowCallbackHandler: ) ) - def test_on_tool_execution_long_json_truncation(self, handler, mock_print_text): + def test_on_tool_execution_long_json_truncation(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "long_json_tool" long_json = "x" * 1500 @@ -144,7 +147,7 @@ class TestDifyWorkflowCallbackHandler: color="blue", ) - def test_on_tool_execution_model_dump_json_exception(self, handler, mock_print_text): + def test_on_tool_execution_model_dump_json_exception(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "exception_tool" bad_message = MagicMock() @@ -163,7 +166,9 @@ class TestDifyWorkflowCallbackHandler: # Ensure first two prints happened before failure assert mock_print_text.call_count >= 2 - def test_on_tool_execution_none_message_id_and_trace_manager(self, handler, mock_print_text): + def test_on_tool_execution_none_message_id_and_trace_manager( + self, handler: DifyWorkflowCallbackHandler, mock_print_text + ): # Arrange tool_name = "optional_params_tool" message = DummyToolInvokeMessage('{"data": "ok"}') diff --git a/api/tests/unit_tests/core/datasource/test_datasource_manager.py b/api/tests/unit_tests/core/datasource/test_datasource_manager.py index deeac49bbc..8842d678c7 100644 --- a/api/tests/unit_tests/core/datasource/test_datasource_manager.py +++ b/api/tests/unit_tests/core/datasource/test_datasource_manager.py @@ -2,6 +2,7 @@ import types from collections.abc import Generator import pytest +from pytest_mock import MockerFixture from contexts.wrapper import RecyclableContextVar from core.datasource.datasource_manager import DatasourceManager @@ -37,7 +38,7 @@ def _invalidate_recyclable_contextvars() -> None: RecyclableContextVar.increment_thread_recycles() -def test_get_icon_url_calls_runtime(mocker): +def test_get_icon_url_calls_runtime(mocker: MockerFixture): fake_runtime = mocker.Mock() fake_runtime.get_icon_url.return_value = "https://icon" mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=fake_runtime) @@ -52,7 +53,7 @@ def test_get_icon_url_calls_runtime(mocker): DatasourceManager.get_datasource_runtime.assert_called_once() -def test_get_datasource_runtime_delegates_to_provider_controller(mocker): +def test_get_datasource_runtime_delegates_to_provider_controller(mocker: MockerFixture): provider_controller = mocker.Mock() provider_controller.get_datasource.return_value = object() mocker.patch.object(DatasourceManager, "get_datasource_plugin_provider", return_value=provider_controller) @@ -114,7 +115,7 @@ def test_get_datasource_plugin_provider_creates_controller_and_caches(mocker, da assert ctrl_cls.call_count == 1 -def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker): +def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker: MockerFixture): _invalidate_recyclable_contextvars() mocker.patch( "core.datasource.datasource_manager.PluginDatasourceManager.fetch_datasource_provider", @@ -129,7 +130,7 @@ def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mock ) -def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): +def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -145,7 +146,7 @@ def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): ) -def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): +def test_get_datasource_plugin_provider_raises_when_controller_none(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -165,7 +166,7 @@ def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): ) -def test_stream_online_results_yields_messages_online_document(mocker): +def test_stream_online_results_yields_messages_online_document(mocker: MockerFixture): # stub runtime to yield a text message def _doc_messages(**_): yield from _gen_messages_text_only("hello") @@ -195,7 +196,7 @@ def test_stream_online_results_yields_messages_online_document(mocker): assert msgs[0].message.text == "hello" -def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker): +def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -229,7 +230,7 @@ def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_do assert final_value == {} -def test_stream_online_results_raises_when_missing_params(mocker): +def test_stream_online_results_raises_when_missing_params(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -279,7 +280,7 @@ def test_stream_online_results_raises_when_missing_params(mocker): ) -def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker): +def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -313,7 +314,7 @@ def test_stream_online_results_yields_messages_and_returns_empty_dict_online_dri assert final_value == {} -def test_stream_online_results_raises_for_unsupported_stream_type(mocker): +def test_stream_online_results_raises_for_unsupported_stream_type(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=mocker.Mock()) mocker.patch( "core.datasource.datasource_manager.DatasourceProviderService.get_datasource_credentials", @@ -337,7 +338,7 @@ def test_stream_online_results_raises_for_unsupported_stream_type(mocker): ) -def test_stream_node_events_emits_events_online_document(mocker): +def test_stream_node_events_emits_events_online_document(mocker: MockerFixture): # make manager's low-level stream produce TEXT only mocker.patch.object( DatasourceManager, @@ -370,7 +371,7 @@ def test_stream_node_events_emits_events_online_document(mocker): assert events[-1].node_run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED -def test_stream_node_events_builds_file_and_variables_from_messages(mocker): +def test_stream_node_events_builds_file_and_variables_from_messages(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -478,7 +479,7 @@ def test_stream_node_events_builds_file_and_variables_from_messages(mocker): assert events[-1].node_run_result.outputs["x"] == 1 -def test_stream_node_events_raises_when_toolfile_missing(mocker): +def test_stream_node_events_raises_when_toolfile_missing(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -526,7 +527,7 @@ def test_stream_node_events_raises_when_toolfile_missing(mocker): ) -def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker): +def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) file_in = File( @@ -580,7 +581,7 @@ def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(moc assert completed.node_run_result.outputs["datasource_type"] == DatasourceProviderType.ONLINE_DRIVE -def test_stream_node_events_skips_file_build_for_non_online_types(mocker): +def test_stream_node_events_skips_file_build_for_non_online_types(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -620,7 +621,7 @@ def test_stream_node_events_skips_file_build_for_non_online_types(mocker): assert events[-1].node_run_result.outputs["file"] is None -def test_get_upload_file_by_id_builds_file(mocker): +def test_get_upload_file_by_id_builds_file(mocker: MockerFixture): # fake UploadFile row fake_row = types.SimpleNamespace( id="fid", @@ -654,7 +655,7 @@ def test_get_upload_file_by_id_builds_file(mocker): assert f.storage_key == "k" -def test_get_upload_file_by_id_raises_when_missing(mocker): +def test_get_upload_file_by_id_raises_when_missing(mocker: MockerFixture): class _S: def __enter__(self): return self diff --git a/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py b/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py index a28143026f..1b714d6830 100644 --- a/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py +++ b/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py @@ -354,7 +354,8 @@ def test_validate_provider_credentials_handles_hidden_secret_value() -> None: with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.decrypt_token", return_value="restored-key"): with patch( @@ -379,7 +380,10 @@ def test_validate_provider_credentials_without_credential_id() -> None: mock_factory = Mock() mock_factory.provider_credentials_validate.return_value = {"region": "us"} - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), + ): validated = configuration.validate_provider_credentials(credentials={"region": "us"}) assert validated == {"region": "us"} @@ -426,23 +430,37 @@ def test_switch_preferred_provider_type_creates_record_when_missing() -> None: def test_get_model_type_instance_and_schema_delegate_to_factory() -> None: configuration = _build_provider_configuration() - mock_factory = Mock() mock_model_type_instance = Mock() mock_schema = _build_ai_model("gpt-4o") - mock_factory.get_model_type_instance.return_value = mock_model_type_instance + mock_factory = Mock() + mock_factory.get_provider_schema.return_value = configuration.provider mock_factory.get_model_schema.return_value = mock_schema + mock_assembly = Mock() + mock_assembly.model_runtime = Mock() + mock_assembly.model_provider_factory = mock_factory - with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", - return_value=mock_factory, - ) as mock_factory_builder: + with ( + patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=mock_assembly, + ) as mock_assembly_builder, + patch( + "core.entities.provider_configuration.create_model_type_instance", + return_value=mock_model_type_instance, + ) as mock_model_builder, + ): model_type_instance = configuration.get_model_type_instance(ModelType.LLM) model_schema = configuration.get_model_schema(ModelType.LLM, "gpt-4o", {"api_key": "x"}) assert model_type_instance is mock_model_type_instance assert model_schema is mock_schema - assert mock_factory_builder.call_count == 2 - mock_factory.get_model_type_instance.assert_called_once_with(provider="openai", model_type=ModelType.LLM) + assert mock_assembly_builder.call_count == 2 + mock_factory.get_provider_schema.assert_called_once_with(provider="openai") + mock_model_builder.assert_called_once_with( + runtime=mock_assembly.model_runtime, + provider_schema=configuration.provider, + model_type=ModelType.LLM, + ) mock_factory.get_model_schema.assert_called_once_with( provider="openai", model_type=ModelType.LLM, @@ -456,17 +474,21 @@ def test_get_model_type_instance_and_schema_reuse_bound_runtime_factory() -> Non bound_runtime = Mock() configuration.bind_model_runtime(bound_runtime) - mock_factory = Mock() mock_model_type_instance = Mock() mock_schema = _build_ai_model("gpt-4o") - mock_factory.get_model_type_instance.return_value = mock_model_type_instance + mock_factory = Mock() + mock_factory.get_provider_schema.return_value = configuration.provider mock_factory.get_model_schema.return_value = mock_schema with ( patch( "core.entities.provider_configuration.ModelProviderFactory", return_value=mock_factory ) as mock_factory_cls, - patch("core.entities.provider_configuration.create_plugin_model_provider_factory") as mock_factory_builder, + patch("core.entities.provider_configuration.create_plugin_model_assembly") as mock_assembly_builder, + patch( + "core.entities.provider_configuration.create_model_type_instance", + return_value=mock_model_type_instance, + ) as mock_model_builder, ): model_type_instance = configuration.get_model_type_instance(ModelType.LLM) model_schema = configuration.get_model_schema(ModelType.LLM, "gpt-4o", {"api_key": "x"}) @@ -474,8 +496,14 @@ def test_get_model_type_instance_and_schema_reuse_bound_runtime_factory() -> Non assert model_type_instance is mock_model_type_instance assert model_schema is mock_schema assert mock_factory_cls.call_count == 2 - mock_factory_cls.assert_called_with(model_runtime=bound_runtime) - mock_factory_builder.assert_not_called() + mock_factory_cls.assert_called_with(runtime=bound_runtime) + mock_assembly_builder.assert_not_called() + mock_factory.get_provider_schema.assert_called_once_with(provider="openai") + mock_model_builder.assert_called_once_with( + runtime=bound_runtime, + provider_schema=configuration.provider, + model_type=ModelType.LLM, + ) def test_get_provider_model_returns_none_when_model_not_found() -> None: @@ -504,7 +532,10 @@ def test_get_provider_models_system_deduplicates_sorts_and_filters_active() -> N mock_factory = Mock() mock_factory.get_provider_schema.return_value = provider_schema - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), + ): all_models = configuration.get_provider_models(model_type=ModelType.LLM, only_active=False) active_models = configuration.get_provider_models(model_type=ModelType.LLM, only_active=True) @@ -722,7 +753,8 @@ def test_validate_provider_credentials_handles_invalid_original_json() -> None: with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-key"): validated = configuration.validate_provider_credentials( @@ -1069,7 +1101,8 @@ def test_validate_custom_model_credentials_supports_hidden_reuse_and_sessionless with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.decrypt_token", return_value="raw"): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): @@ -1083,7 +1116,10 @@ def test_validate_custom_model_credentials_supports_hidden_reuse_and_sessionless mock_factory2 = Mock() mock_factory2.model_credentials_validate.return_value = {"region": "us"} - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory2): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory2), + ): validated = configuration.validate_custom_model_credentials( model_type=ModelType.LLM, model="gpt-4o", @@ -1575,7 +1611,8 @@ def test_validate_provider_credentials_uses_empty_original_when_record_missing() with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): validated = configuration.validate_provider_credentials( @@ -1701,7 +1738,8 @@ def test_validate_custom_model_credentials_handles_invalid_original_json() -> No with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): validated = configuration.validate_custom_model_credentials( diff --git a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py index 399b531205..9c1cbe82a0 100644 --- a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py +++ b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py @@ -1,11 +1,12 @@ import httpx import pytest +from pytest_mock import MockerFixture from core.extension.api_based_extension_requestor import APIBasedExtensionRequestor from models.api_based_extension import APIBasedExtensionPoint -def test_request_success(mocker): +def test_request_success(mocker: MockerFixture): # Mock httpx.Client and its context manager mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value @@ -28,7 +29,7 @@ def test_request_success(mocker): ) -def test_request_with_ssrf_proxy(mocker): +def test_request_with_ssrf_proxy(mocker: MockerFixture): # Mock dify_config mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", "https://proxy:8081") @@ -59,7 +60,7 @@ def test_request_with_ssrf_proxy(mocker): assert mock_transport.call_count == 2 -def test_request_with_only_one_proxy_config(mocker): +def test_request_with_only_one_proxy_config(mocker: MockerFixture): # Mock dify_config with only one proxy mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", None) @@ -84,7 +85,7 @@ def test_request_with_only_one_proxy_config(mocker): assert kwargs.get("mounts") is None -def test_request_timeout(mocker): +def test_request_timeout(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -95,7 +96,7 @@ def test_request_timeout(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_connection_error(mocker): +def test_request_connection_error(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -106,7 +107,7 @@ def test_request_connection_error(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code(mocker): +def test_request_error_status_code(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -121,7 +122,7 @@ def test_request_error_status_code(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code_long_content(mocker): +def test_request_error_status_code_long_content(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) diff --git a/api/tests/unit_tests/core/helper/test_creators.py b/api/tests/unit_tests/core/helper/test_creators.py index df67d3f513..8750f6d907 100644 --- a/api/tests/unit_tests/core/helper/test_creators.py +++ b/api/tests/unit_tests/core/helper/test_creators.py @@ -8,7 +8,7 @@ from yarl import URL @pytest.fixture(autouse=True) -def _patch_creators_url(monkeypatch): +def _patch_creators_url(monkeypatch: pytest.MonkeyPatch): """Patch the module-level creators_platform_api_url for all tests.""" monkeypatch.setattr( "core.helper.creators.creators_platform_api_url", diff --git a/api/tests/unit_tests/core/helper/test_moderation.py b/api/tests/unit_tests/core/helper/test_moderation.py index a0dfa86d20..c33002329b 100644 --- a/api/tests/unit_tests/core/helper/test_moderation.py +++ b/api/tests/unit_tests/core/helper/test_moderation.py @@ -68,8 +68,8 @@ def test_check_moderation_returns_true_when_model_accepts_text(mocker: MockerFix mocker.patch("core.helper.moderation.secrets.choice", return_value="chunk") moderation_model = SimpleNamespace(invoke=lambda **invoke_kwargs: invoke_kwargs["text"] == "chunk") - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: moderation_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: moderation_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) assert ( check_moderation( @@ -91,7 +91,7 @@ def test_check_moderation_returns_true_when_text_is_empty(mocker: MockerFixture) provider_map={openai_provider: hosting_openai}, ), ) - factory_mock = mocker.patch("core.helper.moderation.create_plugin_model_provider_factory") + factory_mock = mocker.patch("core.helper.moderation.create_plugin_model_assembly") choice_mock = mocker.patch("core.helper.moderation.secrets.choice") assert ( @@ -119,8 +119,8 @@ def test_check_moderation_returns_false_when_model_rejects_text(mocker: MockerFi mocker.patch("core.helper.moderation.secrets.choice", return_value="chunk") moderation_model = SimpleNamespace(invoke=lambda **_invoke_kwargs: False) - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: moderation_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: moderation_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) assert ( check_moderation( @@ -147,8 +147,8 @@ def test_check_moderation_raises_bad_request_when_provider_call_fails(mocker: Mo failing_model = SimpleNamespace( invoke=lambda **_invoke_kwargs: (_ for _ in ()).throw(RuntimeError("boom")), ) - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: failing_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: failing_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) with pytest.raises(InvokeBadRequestError, match="Rate limit exceeded, please try again later."): check_moderation( diff --git a/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py b/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py index c4fd970562..2b51dc8182 100644 --- a/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py +++ b/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py @@ -2,6 +2,7 @@ from unittest.mock import Mock import pytest +from core.plugin.impl.model_runtime_factory import create_model_type_instance from graphon.model_runtime.entities.common_entities import I18nObject from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ( @@ -73,7 +74,7 @@ def test_model_provider_factory_resolves_runtime_provider_name() -> None: supported_model_types=[ModelType.LLM], configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL], ) - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime([provider])) + factory = ModelProviderFactory(runtime=_FakeModelRuntime([provider])) provider_schema = factory.get_model_provider("openai") @@ -98,7 +99,7 @@ def test_model_provider_factory_resolves_canonical_short_name_independent_of_pro configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) provider_schema = factory.get_model_provider("openai") @@ -107,8 +108,8 @@ def test_model_provider_factory_resolves_canonical_short_name_independent_of_pro def test_model_provider_factory_requires_runtime() -> None: - with pytest.raises(ValueError, match="model_runtime is required"): - ModelProviderFactory(model_runtime=None) # type: ignore[arg-type] + with pytest.raises(ValueError, match="runtime is required"): + ModelProviderFactory(runtime=None) # type: ignore[arg-type] def test_model_provider_factory_get_providers_returns_runtime_providers() -> None: @@ -119,7 +120,7 @@ def test_model_provider_factory_get_providers_returns_runtime_providers() -> Non supported_model_types=[ModelType.LLM], ) ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) result = factory.get_providers() @@ -133,7 +134,7 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup provider_name="openai", supported_model_types=[ModelType.LLM], ) - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime([provider])) + factory = ModelProviderFactory(runtime=_FakeModelRuntime([provider])) result = factory.get_provider_schema("openai") @@ -142,7 +143,7 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup def test_model_provider_factory_raises_for_unknown_provider() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -172,7 +173,7 @@ def test_model_provider_factory_get_models_filters_provider_and_model_type() -> models=[_build_model("rerank-v3", ModelType.RERANK)], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(provider="openai", model_type=ModelType.LLM) @@ -196,7 +197,7 @@ def test_model_provider_factory_get_models_skips_providers_without_requested_mod models=[_build_model("eleven_multilingual_v2", ModelType.TTS)], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(model_type=ModelType.TTS) @@ -214,7 +215,7 @@ def test_model_provider_factory_get_models_without_model_type_keeps_all_provider models=[_build_model("gpt-4o-mini", ModelType.LLM), _build_model("tts-1", ModelType.TTS)], ) ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(provider="openai") @@ -242,7 +243,7 @@ def test_model_provider_factory_validates_provider_credentials() -> None: ) ] ) - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) filtered = factory.provider_credentials_validate( provider="openai", @@ -258,7 +259,7 @@ def test_model_provider_factory_validates_provider_credentials() -> None: def test_model_provider_factory_provider_credentials_validate_requires_schema() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -294,7 +295,7 @@ def test_model_provider_factory_validates_model_credentials() -> None: ) ] ) - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) filtered = factory.model_credentials_validate( provider="openai", @@ -314,7 +315,7 @@ def test_model_provider_factory_validates_model_credentials() -> None: def test_model_provider_factory_model_credentials_validate_requires_schema() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -346,7 +347,7 @@ def test_model_provider_factory_get_model_schema_and_icon_use_canonical_provider ) runtime.get_model_schema.return_value = "schema" runtime.get_provider_icon.return_value = (b"icon", "image/png") - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) assert ( factory.get_model_schema( @@ -382,39 +383,43 @@ def test_model_provider_factory_get_model_schema_and_icon_use_canonical_provider (ModelType.TTS, TTSModel), ], ) -def test_model_provider_factory_builds_model_type_instances( +def test_create_model_type_instance_builds_model_wrappers( model_type: ModelType, expected_type: type[object], ) -> None: - factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( - [ - _build_provider( - provider="langgenius/openai/openai", - provider_name="openai", - supported_model_types=[model_type], - ) - ] - ) + runtime = _FakeModelRuntime( + [ + _build_provider( + provider="langgenius/openai/openai", + provider_name="openai", + supported_model_types=[model_type], + ) + ] ) - instance = factory.get_model_type_instance("openai", model_type) + instance = create_model_type_instance( + runtime=runtime, + provider_schema=runtime.fetch_model_providers()[0], + model_type=model_type, + ) assert isinstance(instance, expected_type) -def test_model_provider_factory_rejects_unsupported_model_type() -> None: - factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( - [ - _build_provider( - provider="langgenius/openai/openai", - provider_name="openai", - supported_model_types=[ModelType.LLM], - ) - ] - ) +def test_create_model_type_instance_rejects_unsupported_model_type() -> None: + runtime = _FakeModelRuntime( + [ + _build_provider( + provider="langgenius/openai/openai", + provider_name="openai", + supported_model_types=[ModelType.LLM], + ) + ] ) with pytest.raises(ValueError, match="Unsupported model type: unsupported"): - factory.get_model_type_instance("openai", "unsupported") # type: ignore[arg-type] + create_model_type_instance( + runtime=runtime, + provider_schema=runtime.fetch_model_providers()[0], + model_type="unsupported", # type: ignore[arg-type] + ) diff --git a/api/tests/unit_tests/core/ops/test_base_trace_instance.py b/api/tests/unit_tests/core/ops/test_base_trace_instance.py index ac65d13454..15a2af17ca 100644 --- a/api/tests/unit_tests/core/ops/test_base_trace_instance.py +++ b/api/tests/unit_tests/core/ops/test_base_trace_instance.py @@ -18,7 +18,7 @@ class ConcreteTraceInstance(BaseTraceInstance): @pytest.fixture -def mock_db_session(monkeypatch): +def mock_db_session(monkeypatch: pytest.MonkeyPatch): mock_session = MagicMock(spec=Session) mock_session.__enter__.return_value = mock_session mock_session.__exit__.return_value = None diff --git a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py index e47df0121e..33a3293682 100644 --- a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py +++ b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py @@ -203,7 +203,7 @@ class DummySessionContext: @pytest.fixture(autouse=True) -def patch_provider_map(monkeypatch): +def patch_provider_map(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({"dummy": FAKE_PROVIDER_ENTRY}) ) @@ -212,7 +212,7 @@ def patch_provider_map(monkeypatch): @pytest.fixture(autouse=True) -def patch_timer_and_current_app(monkeypatch): +def patch_timer_and_current_app(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.threading.Timer", DummyTimer) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_queue", queue.Queue()) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_timer", None) @@ -227,12 +227,12 @@ def patch_timer_and_current_app(monkeypatch): @pytest.fixture(autouse=True) -def patch_sqlalchemy_session(monkeypatch): +def patch_sqlalchemy_session(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.Session", DummySessionContext) @pytest.fixture -def encryption_mocks(monkeypatch): +def encryption_mocks(monkeypatch: pytest.MonkeyPatch): encrypt_mock = MagicMock(side_effect=lambda tenant, value: f"enc-{value}") batch_decrypt_mock = MagicMock(side_effect=lambda tenant, values: [f"dec-{value}" for value in values]) obfuscate_mock = MagicMock(side_effect=lambda value: f"ob-{value}") @@ -243,7 +243,7 @@ def encryption_mocks(monkeypatch): @pytest.fixture -def mock_db(monkeypatch): +def mock_db(monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.scalars.return_value.all.return_value = ["chat"] db_mock = MagicMock() @@ -254,7 +254,7 @@ def mock_db(monkeypatch): @pytest.fixture -def workflow_repo_fixture(monkeypatch): +def workflow_repo_fixture(monkeypatch: pytest.MonkeyPatch): repo = MagicMock() repo.get_workflow_run_by_id_without_tenant.return_value = make_workflow_run() monkeypatch.setattr(TraceTask, "_get_workflow_run_repo", classmethod(lambda cls: repo)) @@ -340,13 +340,13 @@ def test_get_ops_trace_instance_handles_none_app(mock_db): assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch): +def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": False})) mock_db.get.return_value = app assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch): +def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": True, "tracing_provider": "missing"})) mock_db.get.return_value = app monkeypatch.setattr("core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({})) @@ -388,7 +388,7 @@ def test_get_app_config_through_message_id_app_model_config(mock_db): assert result.id == "cfg" -def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch): +def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): mock_db.get.return_value = None with pytest.raises(ValueError, match="Invalid tracing provider"): OpsTraceManager.update_app_tracing_config("app", True, "bad") @@ -407,21 +407,21 @@ def test_update_app_tracing_config_success(mock_db): def test_get_app_tracing_config_errors_when_missing(mock_db): mock_db.get.return_value = None with pytest.raises(ValueError, match="App not found"): - OpsTraceManager.get_app_tracing_config("app") + OpsTraceManager.get_app_tracing_config("app", mock_db) def test_get_app_tracing_config_returns_defaults(mock_db): mock_db.get.return_value = SimpleNamespace(tracing=None) - assert OpsTraceManager.get_app_tracing_config("app-id") == {"enabled": False, "tracing_provider": None} + assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == {"enabled": False, "tracing_provider": None} def test_get_app_tracing_config_returns_payload(mock_db): payload = {"enabled": True, "tracing_provider": "dummy"} mock_db.get.return_value = SimpleNamespace(tracing=json.dumps(payload)) - assert OpsTraceManager.get_app_tracing_config("app-id") == payload + assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == payload -def test_check_and_project_helpers(monkeypatch): +def test_check_and_project_helpers(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap( @@ -449,7 +449,7 @@ def test_check_and_project_helpers(monkeypatch): assert OpsTraceManager.get_trace_config_project_url({}, "dummy") == "url" -def test_trace_task_conversation_and_extract(monkeypatch): +def test_trace_task_conversation_and_extract(monkeypatch: pytest.MonkeyPatch): task = TraceTask(trace_type=TraceTaskName.CONVERSATION_TRACE, message_id="msg") assert task.conversation_trace(foo="bar") == {"foo": "bar"} assert task._extract_streaming_metrics(make_message_data(message_metadata="not json")) == {} @@ -525,7 +525,7 @@ def test_extract_streaming_metrics_invalid_json(): assert task._extract_streaming_metrics(fake_message) == {} -def test_trace_queue_manager_add_and_collect(monkeypatch): +def test_trace_queue_manager_add_and_collect(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -536,7 +536,7 @@ def test_trace_queue_manager_add_and_collect(monkeypatch): assert tasks == [task] -def test_trace_queue_manager_run_invokes_send(monkeypatch): +def test_trace_queue_manager_run_invokes_send(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -556,7 +556,7 @@ def test_trace_queue_manager_run_invokes_send(monkeypatch): assert called["tasks"] == [task] -def test_trace_queue_manager_send_to_celery(monkeypatch): +def test_trace_queue_manager_send_to_celery(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) diff --git a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py index a4903054e0..13cf01651e 100644 --- a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py +++ b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py @@ -19,7 +19,7 @@ import pytest @pytest.fixture -def trace_queue_manager_and_task(monkeypatch): +def trace_queue_manager_and_task(monkeypatch: pytest.MonkeyPatch): """Fixture to provide TraceQueueManager and TraceTask with delayed imports.""" module_name = "core.ops.ops_trace_manager" if module_name not in sys.modules: diff --git a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py index 1537ffacf5..d8843f0eeb 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.request import PluginInvokeContext from core.plugin.impl.agent import PluginAgentClient @@ -15,7 +17,7 @@ def _agent_provider(name: str = "agent") -> SimpleNamespace: class TestPluginAgentClient: - def test_fetch_agent_strategy_providers(self, mocker): + def test_fetch_agent_strategy_providers(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("remote") @@ -43,7 +45,7 @@ class TestPluginAgentClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.strategies[0].identity.provider == "org/plugin/remote" - def test_fetch_agent_strategy_provider(self, mocker): + def test_fetch_agent_strategy_provider(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("provider") @@ -63,7 +65,7 @@ class TestPluginAgentClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.strategies[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks_and_passes_context(self, mocker): + def test_invoke_merges_chunks_and_passes_context(self, mocker: MockerFixture): client = PluginAgentClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["raw"]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py index 5f564062d5..c2cce5d691 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py @@ -1,12 +1,13 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.plugin.impl.asset import PluginAssetManager class TestPluginAssetManager: - def test_fetch_asset_success(self, mocker): + def test_fetch_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"asset-bytes") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -16,14 +17,14 @@ class TestPluginAssetManager: assert result == b"asset-bytes" request_mock.assert_called_once_with(method="GET", path="plugin/tenant-1/asset/asset-1") - def test_fetch_asset_not_found_raises(self, mocker): + def test_fetch_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) with pytest.raises(ValueError, match="can not found asset asset-1"): manager.fetch_asset("tenant-1", "asset-1") - def test_extract_asset_success(self, mocker): + def test_extract_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"file-content") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -37,7 +38,7 @@ class TestPluginAssetManager: params={"plugin_unique_identifier": "org/plugin:1", "file_path": "README.md"}, ) - def test_extract_asset_not_found_raises(self, mocker): + def test_extract_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) diff --git a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py index 23894bd417..b154f056ca 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.plugin.endpoint.exc import EndpointSetupFailedError from core.plugin.entities.plugin_daemon import PluginDaemonInnerError @@ -39,7 +40,7 @@ class _StreamContext: class TestBasePluginClientImpl: - def test_inject_trace_headers(self, mocker): + def test_inject_trace_headers(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch("core.plugin.impl.base.dify_config.ENABLE_OTEL", True) trace_header = "00-abc-xyz-01" @@ -54,7 +55,7 @@ class TestBasePluginClientImpl: client._inject_trace_headers(headers_with_existing) assert headers_with_existing["TraceParent"] == "exists" - def test_stream_request_handles_data_lines_and_dict_payload(self, mocker): + def test_stream_request_handles_data_lines_and_dict_payload(self, mocker: MockerFixture): client = BasePluginClient() stream_mock = mocker.patch( "httpx.Client.stream", @@ -66,14 +67,14 @@ class TestBasePluginClientImpl: assert result == ["hello", "world"] assert stream_mock.call_args.kwargs["data"] == {"k": "v"} - def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker): + def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", side_effect=RuntimeError("boom")) with pytest.raises(ValueError, match="Failed to request plugin daemon"): client._request_with_plugin_daemon_response("GET", "plugin/tenant/path", bool) - def test_request_with_plugin_daemon_response_applies_transformer(self, mocker): + def test_request_with_plugin_daemon_response_applies_transformer(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", return_value=_ResponseStub({"code": 0, "message": "", "data": True})) @@ -88,14 +89,14 @@ class TestBasePluginClientImpl: assert result is True assert transformed == {"code": 0, "message": "", "data": True} - def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"error":"bad-line"}'])) with pytest.raises(ValueError, match="bad-line"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object( client, "_stream_request", return_value=iter(['{"code":-500,"message":"not-json","data":null}']) @@ -105,14 +106,14 @@ class TestBasePluginClientImpl: list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) assert exc_info.value.message == "not-json" - def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":-1,"message":"err","data":null}'])) with pytest.raises(ValueError, match="plugin daemon: err, code: -1"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":0,"message":"","data":null}'])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py index 4c5987d759..94723dcfe2 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.datasource.entities.datasource_entities import ( GetOnlineDocumentPageContentRequest, OnlineDriveBrowseFilesRequest, @@ -19,7 +21,7 @@ def _datasource_provider(name: str = "provider") -> SimpleNamespace: class TestPluginDatasourceManager: - def test_fetch_datasource_providers(self, mocker): + def test_fetch_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -52,7 +54,7 @@ class TestPluginDatasourceManager: assert result[1].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_installed_datasource_providers(self, mocker): + def test_fetch_installed_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -83,7 +85,7 @@ class TestPluginDatasourceManager: assert result[0].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_datasource_provider_local_and_remote(self, mocker): + def test_fetch_datasource_provider_local_and_remote(self, mocker: MockerFixture): manager = PluginDatasourceManager() local = manager.fetch_datasource_provider("tenant-1", "langgenius/file/file") @@ -113,7 +115,7 @@ class TestPluginDatasourceManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.datasources[0].identity.provider == "org/plugin/provider" - def test_get_website_crawl_streaming(self, mocker): + def test_get_website_crawl_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["crawl"]) @@ -132,7 +134,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_pages_streaming(self, mocker): + def test_get_online_document_pages_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["pages"]) @@ -151,7 +153,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_page_content_streaming(self, mocker): + def test_get_online_document_page_content_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["content"]) @@ -170,7 +172,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_browse_files_streaming(self, mocker): + def test_online_drive_browse_files_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["browse"]) @@ -189,7 +191,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_download_file_streaming(self, mocker): + def test_online_drive_download_file_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["download"]) @@ -208,14 +210,14 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker): + def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([SimpleNamespace(result=True)]) assert manager.validate_provider_credentials("tenant-1", "user-1", "provider", "org/plugin", {"k": "v"}) is True - def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker): + def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py index c80785aee0..05959207b1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py @@ -1,10 +1,12 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.impl.debugging import PluginDebuggingClient class TestPluginDebuggingClient: - def test_get_debugging_key(self, mocker): + def test_get_debugging_key(self, mocker: MockerFixture): client = PluginDebuggingClient() request_mock = mocker.patch.object( client, diff --git a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py index 4cf657a050..7a24cc01d1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py @@ -1,11 +1,12 @@ import pytest +from pytest_mock import MockerFixture from core.plugin.impl.endpoint import PluginEndpointClient from core.plugin.impl.exc import PluginDaemonInternalServerError class TestPluginEndpointClientImpl: - def test_create_endpoint(self, mocker): + def test_create_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -18,7 +19,7 @@ class TestPluginEndpointClientImpl: assert args[:3] == ("POST", "plugin/tenant-1/endpoint/setup", bool) assert kwargs["data"]["plugin_unique_identifier"] == "org/plugin:1" - def test_list_endpoints(self, mocker): + def test_list_endpoints(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -28,7 +29,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list" assert request_mock.call_args.kwargs["params"] == {"page": 2, "page_size": 20} - def test_list_endpoints_for_single_plugin(self, mocker): + def test_list_endpoints_for_single_plugin(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -38,7 +39,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list/plugin" assert request_mock.call_args.kwargs["params"] == {"plugin_id": "org/plugin", "page": 1, "page_size": 10} - def test_update_endpoint(self, mocker): + def test_update_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -47,7 +48,7 @@ class TestPluginEndpointClientImpl: assert result is True assert request_mock.call_args.args[:3] == ("POST", "plugin/tenant-1/endpoint/update", bool) - def test_enable_and_disable_endpoint(self, mocker): + def test_enable_and_disable_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -58,7 +59,7 @@ class TestPluginEndpointClientImpl: assert calls[0].args[1] == "plugin/tenant-1/endpoint/enable" assert calls[1].args[1] == "plugin/tenant-1/endpoint/disable" - def test_delete_endpoint_idempotent_and_re_raise(self, mocker): + def test_delete_endpoint_idempotent_and_re_raise(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response") diff --git a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py index 8c6f1c6b7f..d99a8c114f 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py @@ -1,11 +1,13 @@ import json +from pytest_mock import MockerFixture + from core.plugin.impl import exc as exc_module from core.plugin.impl.exc import PluginDaemonError, PluginInvokeError class TestPluginImplExceptions: - def test_plugin_daemon_error_str_contains_request_id(self, mocker): + def test_plugin_daemon_error_str_contains_request_id(self, mocker: MockerFixture): mocker.patch("core.plugin.impl.exc.get_request_id", return_value="req-123") error = PluginDaemonError("bad") @@ -21,7 +23,7 @@ class TestPluginImplExceptions: assert "RateLimit" in friendly assert "too many" in friendly - def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker): + def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker: MockerFixture): err = PluginInvokeError("plain text") assert err._get_error_object() == {} @@ -32,7 +34,7 @@ class TestPluginImplExceptions: err2 = PluginInvokeError("plain text") assert err2.get_error_message() == "plain text" - def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker): + def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker: MockerFixture): adapter = mocker.patch.object(exc_module, "TypeAdapter") adapter.return_value.validate_json.side_effect = RuntimeError("invalid") diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_client.py b/api/tests/unit_tests/core/plugin/impl/test_model_client.py index bcbebbb38b..6dc572310c 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_client.py @@ -4,13 +4,14 @@ import io from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.plugin.entities.plugin_daemon import PluginDaemonInnerError from core.plugin.impl.model import PluginModelClient class TestPluginModelClient: - def test_fetch_model_providers(self, mocker): + def test_fetch_model_providers(self, mocker: MockerFixture): client = PluginModelClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["provider-a"]) @@ -23,7 +24,7 @@ class TestPluginModelClient: ) assert request_mock.call_args.kwargs["params"] == {"page": 1, "page_size": 256} - def test_get_model_schema(self, mocker): + def test_get_model_schema(self, mocker: MockerFixture): client = PluginModelClient() schema = SimpleNamespace(name="schema") stream_mock = mocker.patch.object( @@ -45,7 +46,7 @@ class TestPluginModelClient: assert result is schema assert stream_mock.call_args.args[:2] == ("POST", "plugin/tenant-1/dispatch/model/schema") - def test_get_model_schema_empty_stream_returns_none(self, mocker): + def test_get_model_schema_empty_stream_returns_none(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -53,7 +54,7 @@ class TestPluginModelClient: assert result is None - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -77,7 +78,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_provider_credentials", ) - def test_validate_provider_credentials_without_dict_update(self, mocker): + def test_validate_provider_credentials_without_dict_update(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -91,13 +92,13 @@ class TestPluginModelClient: assert result is False assert credentials == {"api_key": "same"} - def test_validate_provider_credentials_empty_returns_false(self, mocker): + def test_validate_provider_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.validate_provider_credentials("tenant-1", "user-1", "org/plugin:1", "provider-a", {}) is False - def test_validate_model_credentials(self, mocker): + def test_validate_model_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -123,7 +124,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_model_credentials", ) - def test_validate_model_credentials_empty_returns_false(self, mocker): + def test_validate_model_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -132,7 +133,7 @@ class TestPluginModelClient: is False ) - def test_invoke_llm(self, mocker): + def test_invoke_llm(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk-1"]) @@ -160,7 +161,7 @@ class TestPluginModelClient: assert call_kwargs["data"]["data"]["stream"] is False assert call_kwargs["data"]["data"]["model_parameters"] == {"temperature": 0.1} - def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -182,7 +183,7 @@ class TestPluginModelClient: ) ) - def test_get_llm_num_tokens(self, mocker): + def test_get_llm_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -204,7 +205,7 @@ class TestPluginModelClient: assert result == 42 - def test_get_llm_num_tokens_empty_returns_zero(self, mocker): + def test_get_llm_num_tokens_empty_returns_zero(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -213,7 +214,7 @@ class TestPluginModelClient: == 0 ) - def test_invoke_text_embedding(self, mocker): + def test_invoke_text_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.1, 0.2]]) mocker.patch.object( @@ -233,7 +234,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_text_embedding_empty_raises(self, mocker): + def test_invoke_text_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -242,7 +243,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, ["hello"], "x" ) - def test_invoke_multimodal_embedding(self, mocker): + def test_invoke_multimodal_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.3, 0.4]]) mocker.patch.object( @@ -262,7 +263,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_multimodal_embedding_empty_raises(self, mocker): + def test_invoke_multimodal_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -271,7 +272,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, [{"type": "image"}], "x" ) - def test_get_text_embedding_num_tokens(self, mocker): + def test_get_text_embedding_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -287,7 +288,7 @@ class TestPluginModelClient: 3, ] - def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker): + def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -298,7 +299,7 @@ class TestPluginModelClient: == [] ) - def test_invoke_rerank(self, mocker): + def test_invoke_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.9]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -318,14 +319,14 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_rerank_empty_raises(self, mocker): + def test_invoke_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) with pytest.raises(ValueError, match="Failed to invoke rerank"): client.invoke_rerank("tenant-1", "user-1", "org/plugin:1", "provider-a", "rerank-a", {}, "q", ["doc-1"]) - def test_invoke_multimodal_rerank(self, mocker): + def test_invoke_multimodal_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.8]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -345,7 +346,7 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_multimodal_rerank_empty_raises(self, mocker): + def test_invoke_multimodal_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -361,7 +362,7 @@ class TestPluginModelClient: [{"type": "image"}], ) - def test_invoke_tts(self, mocker): + def test_invoke_tts(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -384,7 +385,7 @@ class TestPluginModelClient: assert result == [b"hello", b"!"] - def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -396,7 +397,7 @@ class TestPluginModelClient: with pytest.raises(ValueError, match="tts error-400"): list(client.invoke_tts("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}, "hello", "alloy")) - def test_get_tts_model_voices(self, mocker): + def test_get_tts_model_voices(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -425,13 +426,13 @@ class TestPluginModelClient: assert result == [{"name": "Alloy", "value": "alloy"}, {"name": "Echo", "value": "echo"}] - def test_get_tts_model_voices_empty_returns_list(self, mocker): + def test_get_tts_model_voices_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.get_tts_model_voices("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}) == [] - def test_invoke_speech_to_text(self, mocker): + def test_invoke_speech_to_text(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -452,7 +453,7 @@ class TestPluginModelClient: assert result == "transcribed text" assert stream_mock.call_args.kwargs["data"]["data"]["file"] == "616263" - def test_invoke_speech_to_text_empty_raises(self, mocker): + def test_invoke_speech_to_text_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -461,7 +462,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "stt-a", {}, io.BytesIO(b"abc") ) - def test_invoke_moderation(self, mocker): + def test_invoke_moderation(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -482,7 +483,7 @@ class TestPluginModelClient: assert result is True assert stream_mock.call_args.kwargs["path"] == "plugin/tenant-1/dispatch/moderation/invoke" - def test_invoke_moderation_empty_raises(self, mocker): + def test_invoke_moderation_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py b/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py index 7491e79f30..52da674f06 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py @@ -31,6 +31,6 @@ def test_plugin_model_assembly_reuses_single_runtime_across_views(): assert assembly.model_manager is model_manager mock_runtime_factory.assert_called_once_with(tenant_id="tenant-1", user_id="user-1") - mock_provider_factory_cls.assert_called_once_with(model_runtime=runtime) + mock_provider_factory_cls.assert_called_once_with(runtime=runtime) mock_provider_manager_cls.assert_called_once_with(model_runtime=runtime) mock_model_manager_cls.assert_called_once_with(provider_manager=provider_manager) diff --git a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py index 6fb4c99432..f6c9b1c669 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py +++ b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.impl.oauth import OAuthHandler @@ -25,7 +26,7 @@ def _build_request(body: bytes = b"payload") -> Request: class TestOAuthHandler: - def test_get_authorization_url(self, mocker): + def test_get_authorization_url(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -45,7 +46,7 @@ class TestOAuthHandler: assert response.authorization_url == "https://auth.example.com" assert stream_mock.call_count == 1 - def test_get_authorization_url_no_response_raises(self, mocker): + def test_get_authorization_url_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -59,7 +60,7 @@ class TestOAuthHandler: system_credentials={}, ) - def test_get_credentials(self, mocker): + def test_get_credentials(self, mocker: MockerFixture): handler = OAuthHandler() captured_data = {} @@ -85,7 +86,7 @@ class TestOAuthHandler: assert "raw_http_request" in captured_data["data"] assert stream_mock.call_count == 1 - def test_get_credentials_no_response_raises(self, mocker): + def test_get_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -100,7 +101,7 @@ class TestOAuthHandler: request=_build_request(), ) - def test_refresh_credentials(self, mocker): + def test_refresh_credentials(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -121,7 +122,7 @@ class TestOAuthHandler: assert response.credentials == {"token": "new"} assert stream_mock.call_count == 1 - def test_refresh_credentials_no_response_raises(self, mocker): + def test_refresh_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py index 80cf46f9bb..3ae3cc18e4 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.tool import PluginToolManager @@ -15,7 +17,7 @@ def _tool_provider(name: str = "provider") -> SimpleNamespace: class TestPluginToolManager: - def test_fetch_tool_providers(self, mocker): + def test_fetch_tool_providers(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("remote") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -44,7 +46,7 @@ class TestPluginToolManager: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.tools[0].identity.provider == "org/plugin/remote" - def test_fetch_tool_provider(self, mocker): + def test_fetch_tool_provider(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("provider") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -68,7 +70,7 @@ class TestPluginToolManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.tools[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks(self, mocker): + def test_invoke_merges_chunks(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object( manager, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk"]) @@ -92,7 +94,7 @@ class TestPluginToolManager: assert merge_mock.call_count == 1 assert stream_mock.call_args.kwargs["headers"]["X-Plugin-ID"] == "org/plugin" - def test_validate_credentials_paths(self, mocker): + def test_validate_credentials_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") @@ -108,7 +110,7 @@ class TestPluginToolManager: stream_mock.return_value = iter([]) assert manager.validate_datasource_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) is False - def test_get_runtime_parameters_paths(self, mocker): + def test_get_runtime_parameters_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") diff --git a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py index 76da51c2c8..811bb7e50d 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.entities.plugin_daemon import CredentialType @@ -62,7 +63,7 @@ def _subscription_call_kwargs(method_name: str) -> dict: class TestPluginTriggerClient: - def test_fetch_trigger_providers(self, mocker): + def test_fetch_trigger_providers(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("remote") @@ -89,7 +90,7 @@ class TestPluginTriggerClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.events[0].identity.provider == "org/plugin/remote" - def test_fetch_trigger_provider(self, mocker): + def test_fetch_trigger_provider(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("provider") @@ -108,7 +109,7 @@ class TestPluginTriggerClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.events[0].identity.provider == "org/plugin/provider" - def test_invoke_trigger_event(self, mocker): + def test_invoke_trigger_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -132,7 +133,7 @@ class TestPluginTriggerClient: assert result.variables == {"ok": True} assert stream_mock.call_count == 1 - def test_invoke_trigger_event_no_response_raises(self, mocker): + def test_invoke_trigger_event_no_response_raises(self, mocker: MockerFixture): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -150,7 +151,7 @@ class TestPluginTriggerClient: payload={"payload": 1}, ) - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response_stream") @@ -163,7 +164,7 @@ class TestPluginTriggerClient: ): client.validate_provider_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) - def test_dispatch_event(self, mocker): + def test_dispatch_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -195,7 +196,7 @@ class TestPluginTriggerClient: ) @pytest.mark.parametrize("method_name", ["subscribe", "unsubscribe", "refresh"]) - def test_subscription_operations_success(self, mocker, method_name): + def test_subscription_operations_success(self, mocker: MockerFixture, method_name): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -217,7 +218,7 @@ class TestPluginTriggerClient: ("refresh", "No response received from plugin daemon for refresh"), ], ) - def test_subscription_operations_no_response(self, mocker, method_name, expected): + def test_subscription_operations_no_response(self, mocker: MockerFixture, method_name, expected): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) method = getattr(client, method_name) diff --git a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py index 3feb4159ad..2ed7c70ed9 100644 --- a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py +++ b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import BaseModel +from pytest_mock import MockerFixture from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig from core.plugin.backwards_invocation.app import PluginAppBackwardsInvocation @@ -41,7 +42,7 @@ class TestBaseBackwardsInvocation: class TestPluginAppBackwardsInvocation: - def test_fetch_app_info_workflow_path(self, mocker): + def test_fetch_app_info_workflow_path(self, mocker: MockerFixture): workflow = MagicMock() workflow.features_dict = {"feature": "v"} workflow.user_input_form.return_value = [{"name": "foo"}] @@ -57,7 +58,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"data": {"mapped": True}} mapper.assert_called_once_with(features_dict={"feature": "v"}, user_input_form=[{"name": "foo"}]) - def test_fetch_app_info_model_config_path(self, mocker): + def test_fetch_app_info_model_config_path(self, mocker: MockerFixture): model_config = MagicMock() model_config.to_dict.return_value = {"user_input_form": [{"name": "bar"}], "k": "v"} app = MagicMock(mode=AppMode.COMPLETION, app_model_config=model_config) @@ -81,7 +82,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.COMPLETION, "invoke_completion_app"), ], ) - def test_invoke_app_routes_by_mode(self, mocker, mode, route_method): + def test_invoke_app_routes_by_mode(self, mocker: MockerFixture, mode, route_method): app = MagicMock(mode=mode) user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -102,7 +103,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"routed": True} assert route.call_count == 1 - def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker): + def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker: MockerFixture): app = MagicMock(mode=AppMode.WORKFLOW) end_user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -127,7 +128,7 @@ class TestPluginAppBackwardsInvocation: get_or_create.assert_called_once_with(app) assert route.call_args.args[1] is end_user - def test_invoke_app_missing_query_for_chat_raises(self, mocker): + def test_invoke_app_missing_query_for_chat_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode=AppMode.CHAT)) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -143,7 +144,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_app_unexpected_mode_raises(self, mocker): + def test_invoke_app_unexpected_mode_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode="other")) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -166,7 +167,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.CHAT, "core.plugin.backwards_invocation.app.ChatAppGenerator.generate"), ], ) - def test_invoke_chat_app_agent_and_chat(self, mocker, mode, generator_path): + def test_invoke_chat_app_agent_and_chat(self, mocker: MockerFixture, mode, generator_path): app = MagicMock(mode=mode, workflow=None) spy = mocker.patch(generator_path, return_value={"result": "ok"}) @@ -183,7 +184,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"result": "ok"} assert spy.call_count == 1 - def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker): + def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -242,7 +243,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_workflow_app_injects_pause_state_config(self, mocker): + def test_invoke_workflow_app_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -284,7 +285,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_completion_app(self, mocker): + def test_invoke_completion_app(self, mocker: MockerFixture): spy = mocker.patch( "core.plugin.backwards_invocation.app.CompletionAppGenerator.generate", return_value={"ok": 1} ) @@ -295,7 +296,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"ok": 1} assert spy.call_count == 1 - def test_get_user_returns_end_user(self, mocker): + def test_get_user_returns_end_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [MagicMock(id="end-user")] session_ctx = MagicMock() @@ -307,7 +308,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "end-user" - def test_get_user_falls_back_to_account_user(self, mocker): + def test_get_user_falls_back_to_account_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, MagicMock(id="account-user")] session_ctx = MagicMock() @@ -319,7 +320,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "account-user" - def test_get_user_raises_when_user_not_found(self, mocker): + def test_get_user_raises_when_user_not_found(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, None] session_ctx = MagicMock() @@ -331,21 +332,21 @@ class TestPluginAppBackwardsInvocation: with pytest.raises(ValueError, match="user not found"): PluginAppBackwardsInvocation._get_user("uid") - def test_get_app_returns_app(self, mocker): + def test_get_app_returns_app(self, mocker: MockerFixture): app_obj = MagicMock(id="app") db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=app_obj))) mocker.patch("core.plugin.backwards_invocation.app.db", db) assert PluginAppBackwardsInvocation._get_app("app", "tenant") is app_obj - def test_get_app_raises_when_missing(self, mocker): + def test_get_app_raises_when_missing(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=None))) mocker.patch("core.plugin.backwards_invocation.app.db", db) with pytest.raises(ValueError, match="app not found"): PluginAppBackwardsInvocation._get_app("app", "tenant") - def test_get_app_raises_when_query_fails(self, mocker): + def test_get_app_raises_when_query_fails(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(side_effect=RuntimeError("db down")))) mocker.patch("core.plugin.backwards_invocation.app.db", db) diff --git a/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py b/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py index 88bf555594..b1ecaa4ead 100644 --- a/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py +++ b/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py @@ -3,7 +3,7 @@ import datetime import uuid from types import SimpleNamespace -from unittest.mock import Mock, sentinel +from unittest.mock import Mock, patch, sentinel import pytest @@ -13,6 +13,8 @@ from core.plugin.impl.model import PluginModelClient from core.plugin.impl.model_runtime import TENANT_SCOPE_SCHEMA_CACHE_USER_ID, PluginModelRuntime from core.plugin.impl.model_runtime_factory import create_plugin_model_runtime from graphon.model_runtime.entities.common_entities import I18nObject +from graphon.model_runtime.entities.llm_entities import LLMResultChunk, LLMResultChunkDelta, LLMUsage +from graphon.model_runtime.entities.message_entities import AssistantPromptMessage from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity @@ -146,7 +148,31 @@ class TestPluginModelRuntime: def test_invoke_llm_resolves_plugin_fields(self) -> None: client = Mock(spec=PluginModelClient) - client.invoke_llm.return_value = sentinel.result + usage = LLMUsage.empty_usage() + client.invoke_llm.return_value = iter( + [ + LLMResultChunk( + model="gpt-4o-mini", + prompt_messages=[], + system_fingerprint="fp-plugin", + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content="plugin "), + ), + ), + LLMResultChunk( + model="gpt-4o-mini", + prompt_messages=[], + system_fingerprint="fp-plugin", + delta=LLMResultChunkDelta( + index=1, + message=AssistantPromptMessage(content="response"), + usage=usage, + finish_reason="stop", + ), + ), + ] + ) runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) result = runtime.invoke_llm( @@ -160,7 +186,11 @@ class TestPluginModelRuntime: stream=False, ) - assert result is sentinel.result + assert result.model == "gpt-4o-mini" + assert result.prompt_messages == [] + assert result.message.content == "plugin response" + assert result.usage == usage + assert result.system_fingerprint == "fp-plugin" client.invoke_llm.assert_called_once_with( tenant_id="tenant", user_id="user", @@ -175,6 +205,38 @@ class TestPluginModelRuntime: stream=False, ) + def test_invoke_llm_returns_plugin_stream_directly(self) -> None: + client = Mock(spec=PluginModelClient) + stream_result = iter([]) + client.invoke_llm.return_value = stream_result + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + + result = runtime.invoke_llm( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0.3}, + prompt_messages=[], + tools=None, + stop=("END",), + stream=True, + ) + + assert result is stream_result + client.invoke_llm.assert_called_once_with( + tenant_id="tenant", + user_id="user", + plugin_id="langgenius/openai", + provider="openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0.3}, + prompt_messages=[], + tools=None, + stop=["END"], + stream=True, + ) + def test_invoke_llm_rejects_per_call_user_override(self) -> None: client = Mock(spec=PluginModelClient) client.invoke_llm.return_value = sentinel.result @@ -267,6 +329,129 @@ def test_get_model_schema_uses_cached_schema_without_hitting_client(monkeypatch: client.get_model_schema.assert_not_called() +def test_structured_output_adapter_invokes_bound_runtime_streaming() -> None: + runtime = Mock() + runtime.invoke_llm.return_value = sentinel.stream_result + adapter = model_runtime_module._PluginStructuredOutputModelInstance( + runtime=runtime, + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + tool = Mock() + + result = adapter.invoke_llm( + prompt_messages=[], + model_parameters=None, + tools=[tool], + stop=["END"], + stream=True, + callbacks=sentinel.callbacks, + ) + + assert result is sentinel.stream_result + runtime.invoke_llm.assert_called_once_with( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={}, + prompt_messages=[], + tools=[tool], + stop=["END"], + stream=True, + ) + + +def test_structured_output_adapter_invokes_bound_runtime_non_streaming() -> None: + runtime = Mock() + runtime.invoke_llm.return_value = sentinel.result + adapter = model_runtime_module._PluginStructuredOutputModelInstance( + runtime=runtime, + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + + result = adapter.invoke_llm( + prompt_messages=[], + model_parameters={"temperature": 0}, + tools=None, + stop=None, + stream=False, + ) + + assert result is sentinel.result + runtime.invoke_llm.assert_called_once_with( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0}, + prompt_messages=[], + tools=None, + stop=None, + stream=False, + ) + + +def test_invoke_llm_with_structured_output_delegates_with_bound_adapter() -> None: + client = Mock(spec=PluginModelClient) + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + schema = _build_model_schema() + runtime.get_model_schema = Mock(return_value=schema) # type: ignore[method-assign] + + with patch.object( + model_runtime_module, + "invoke_llm_with_structured_output_helper", + return_value=sentinel.structured_result, + ) as mock_helper: + result = runtime.invoke_llm_with_structured_output( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + json_schema={"type": "object"}, + model_parameters={"temperature": 0}, + prompt_messages=[], + stop=("END",), + stream=False, + ) + + assert result is sentinel.structured_result + runtime.get_model_schema.assert_called_once_with( + provider="langgenius/openai/openai", + model_type=ModelType.LLM, + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + helper_kwargs = mock_helper.call_args.kwargs + assert helper_kwargs["provider"] == "langgenius/openai/openai" + assert helper_kwargs["model_schema"] == schema + assert helper_kwargs["json_schema"] == {"type": "object"} + assert helper_kwargs["model_parameters"] == {"temperature": 0} + assert helper_kwargs["prompt_messages"] == [] + assert helper_kwargs["tools"] is None + assert helper_kwargs["stop"] == ["END"] + assert helper_kwargs["stream"] is False + assert isinstance(helper_kwargs["model_instance"], model_runtime_module._PluginStructuredOutputModelInstance) + + +def test_invoke_llm_with_structured_output_raises_when_model_schema_is_missing() -> None: + client = Mock(spec=PluginModelClient) + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + runtime.get_model_schema = Mock(return_value=None) # type: ignore[method-assign] + + with pytest.raises(ValueError, match="Model schema not found for gpt-4o-mini"): + runtime.invoke_llm_with_structured_output( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + json_schema={"type": "object"}, + model_parameters={}, + prompt_messages=[], + stop=None, + stream=False, + ) + + def test_get_model_schema_deletes_invalid_cache_and_refetches(monkeypatch: pytest.MonkeyPatch) -> None: client = Mock(spec=PluginModelClient) schema = _build_model_schema() diff --git a/api/tests/unit_tests/core/plugin/test_plugin_entities.py b/api/tests/unit_tests/core/plugin/test_plugin_entities.py index f1c4c7e700..deac0ba1da 100644 --- a/api/tests/unit_tests/core/plugin/test_plugin_entities.py +++ b/api/tests/unit_tests/core/plugin/test_plugin_entities.py @@ -5,6 +5,7 @@ from enum import StrEnum import pytest from flask import Response from pydantic import ValidationError +from pytest_mock import MockerFixture from core.plugin.entities.endpoint import EndpointEntityWithInstance from core.plugin.entities.marketplace import MarketplacePluginDeclaration, MarketplacePluginSnapshot @@ -34,7 +35,7 @@ from graphon.model_runtime.entities.message_entities import ( class TestEndpointEntity: - def test_endpoint_entity_with_instance_renders_url(self, mocker): + def test_endpoint_entity_with_instance_renders_url(self, mocker: MockerFixture): mocker.patch("core.plugin.entities.endpoint.dify_config.ENDPOINT_URL_TEMPLATE", "https://dify.test/{hook_id}") now = datetime.datetime.now(datetime.UTC) diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py index 1b114b369a..1f46634b89 100644 --- a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -1,5 +1,7 @@ from uuid import uuid4 +from pytest_mock import MockerFixture + from constants import UUID_NIL from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.prompt.utils.get_thread_messages_length import get_thread_messages_length @@ -103,7 +105,7 @@ def test_extract_thread_messages_breaks_when_parent_is_none(): assert result[0].id == id2 -def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): +def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer=""), # newest generated message should be excluded @@ -119,7 +121,7 @@ def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): mock_scalars.assert_called_once() -def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker): +def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer="latest-answer"), diff --git a/api/tests/unit_tests/core/prompt/test_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_prompt_transform.py index 5308c8e7b3..3d71e73496 100644 --- a/api/tests/unit_tests/core/prompt/test_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_prompt_transform.py @@ -209,7 +209,7 @@ class TestPromptTransform: assert result == ["only"] memory.get_history_prompt_messages.assert_called_with(max_token_limit=10, message_limit=None) - def test_append_chat_histories_extends_prompt_messages(self, monkeypatch): + def test_append_chat_histories_extends_prompt_messages(self, monkeypatch: pytest.MonkeyPatch): transform = PromptTransform() memory = MagicMock() memory_config = SimpleNamespace(window=SimpleNamespace(enabled=False, size=None)) diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 1e91c2dd88..e233bd2ef0 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -67,7 +67,7 @@ def _dataset(dataset_keyword_table=None, keyword_number=None): @pytest.fixture -def patched_runtime(monkeypatch): +def patched_runtime(monkeypatch: pytest.MonkeyPatch): session = MagicMock() db = SimpleNamespace(session=session) storage = MagicMock() @@ -151,7 +151,7 @@ def test_add_texts_without_keywords_list_always_uses_extractor(monkeypatch, patc assert set(keyword._update_segment_keywords.call_args.args[2]) == {"from-extractor"} -def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch): +def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value=None)) @@ -308,7 +308,7 @@ def test_add_and_delete_ids_from_keyword_table_helpers(): assert deleted["kw2"] == {"node-2"} -def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch): +def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) handler = MagicMock() handler.extract_keywords.return_value = ["kw-a", "kw-b"] @@ -350,7 +350,7 @@ def test_update_segment_keywords_updates_when_segment_exists(monkeypatch, patche patched_runtime.session.commit.assert_not_called() -def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): +def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value={})) monkeypatch.setattr(keyword, "_update_segment_keywords", MagicMock()) @@ -365,7 +365,7 @@ def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): keyword._save_dataset_keyword_table.assert_called_once() -def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch): +def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table(), keyword_number=2)) handler = MagicMock() handler.extract_keywords.return_value = {"auto"} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py index a4586c141b..c8ee75bf43 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py @@ -2,6 +2,8 @@ import sys import types from types import SimpleNamespace +import pytest + from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS @@ -38,7 +40,7 @@ def _install_fake_jieba_modules( monkeypatch.delitem(sys.modules, "jieba.analyse.tfidf", raising=False) -def test_init_uses_existing_default_tfidf(monkeypatch): +def test_init_uses_existing_default_tfidf(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") default_tfidf = _DummyTFIDF() analyse_module.default_tfidf = default_tfidf @@ -51,7 +53,7 @@ def test_init_uses_existing_default_tfidf(monkeypatch): assert handler._tfidf.stop_words == STOPWORDS -def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): +def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -67,7 +69,7 @@ def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): +def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -85,7 +87,7 @@ def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): +def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None _install_fake_jieba_modules(monkeypatch, analyse_module) @@ -96,7 +98,7 @@ def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): assert fallback_keywords == ["two"] -def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): +def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules(monkeypatch, analyse_module, jieba_attrs={"lcut": lambda _: ["x", "x", "y"]}) @@ -105,7 +107,7 @@ def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): assert tfidf.extract_tags("ignored", topK=1) == ["x"] -def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch): +def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules( monkeypatch, diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py index 0d969a3270..e1765b17cb 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py @@ -10,7 +10,7 @@ from core.rag.datasource.keyword.keyword_type import KeyWordType from core.rag.models.document import Document -def test_get_keyword_factory_returns_jieba_factory(monkeypatch): +def test_get_keyword_factory_returns_jieba_factory(monkeypatch: pytest.MonkeyPatch): fake_module = types.ModuleType("core.rag.datasource.keyword.jieba.jieba") class FakeJieba: @@ -27,7 +27,7 @@ def test_get_keyword_factory_raises_for_unsupported_type(): Keyword.get_keyword_factory("unsupported") -def test_keyword_initialization_uses_configured_factory(monkeypatch): +def test_keyword_initialization_uses_configured_factory(monkeypatch: pytest.MonkeyPatch): dataset = SimpleNamespace(id="dataset-1") fake_processor = MagicMock() diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index b0ecad4d0c..d38213dd89 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -182,7 +182,7 @@ class TestRetrievalServiceInternals: app.app_context.return_value.__exit__.return_value = False return app - def test_retrieve_with_attachment_ids_only(self, monkeypatch, internal_dataset): + def test_retrieve_with_attachment_ids_only(self, monkeypatch: pytest.MonkeyPatch, internal_dataset): with ( patch("core.rag.datasource.retrieval_service.RetrievalService._get_dataset", return_value=internal_dataset), patch("core.rag.datasource.retrieval_service.RetrievalService._retrieve") as mock_retrieve, @@ -699,7 +699,9 @@ class TestRetrievalServiceInternals: assert RetrievalService.format_retrieval_documents(documents) == [] - def test_format_retrieval_documents_with_parent_child_summary_and_attachments(self, monkeypatch): + def test_format_retrieval_documents_with_parent_child_summary_and_attachments( + self, monkeypatch: pytest.MonkeyPatch + ): dataset_doc_parent = SimpleNamespace( id="doc-parent", doc_form=IndexStructureType.PARENT_CHILD_INDEX, @@ -877,7 +879,7 @@ class TestRetrievalServiceInternals: assert result_by_segment_id["segment-parent-summary"].summary == "summary for parent" assert result_by_segment_id["segment-parent-summary"].child_chunks == [] - def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch): + def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch: pytest.MonkeyPatch): rollback = Mock() monkeypatch.setattr(retrieval_service_module.db.session, "rollback", rollback) monkeypatch.setattr(retrieval_service_module.db.session, "scalars", Mock(side_effect=RuntimeError("db error"))) @@ -936,7 +938,7 @@ class TestRetrievalServiceInternals: future_ok.cancel.assert_called() def test_retrieve_internal_raises_value_error_when_exceptions_exist( - self, monkeypatch, internal_dataset, internal_flask_app + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) @@ -958,7 +960,9 @@ class TestRetrievalServiceInternals: query="query", ) - def test_retrieve_internal_hybrid_weighted_attachment_flow(self, monkeypatch, internal_dataset, internal_flask_app): + def test_retrieve_internal_hybrid_weighted_attachment_flow( + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app + ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index 7b6ee97f1c..067159398d 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -102,7 +102,9 @@ def test_gen_index_struct_dict(vector_factory_module): ("HOLOGRES", "dify_vdb_hologres.hologres_vector", "HologresVectorFactory"), ], ) -def test_get_vector_factory_supported(vector_factory_module, monkeypatch, vector_type, module_path, class_name): +def test_get_vector_factory_supported( + vector_factory_module, monkeypatch: pytest.MonkeyPatch, vector_type, module_path, class_name +): expected_cls = _register_fake_factory_module(monkeypatch, module_path, class_name) result_cls = vector_factory_module.Vector.get_vector_factory(getattr(vector_factory_module.VectorType, vector_type)) @@ -119,7 +121,7 @@ class _PluginChromaFactory: """Stub used only for entry-point override test.""" -def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch): +def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch: pytest.MonkeyPatch): from importlib.metadata import EntryPoint from core.rag.datasource.vdb import vector_backend_registry as reg @@ -171,7 +173,7 @@ def test_vector_init_uses_default_and_custom_attributes(vector_factory_module): assert default_vector._vector_processor == "processor" -def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch): +def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch: pytest.MonkeyPatch): """``Vector(dataset)`` must not transitively call ``ModelManager`` during construction. The real embedding model should only be materialized on the first ``embed_*`` call (i.e. create / search paths) so cleanup paths @@ -214,7 +216,7 @@ def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_m inner_model.embed_documents.assert_called_once_with(["world"]) -def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch): +def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch: pytest.MonkeyPatch): calls = {"vector_type": None, "init_args": None} class _Factory: @@ -242,7 +244,7 @@ def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeyp assert calls["init_args"] == (vector._dataset, ["doc_id"], "embeddings") -def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch): +def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Expr: def __eq__(self, _other): return "expr" @@ -279,7 +281,7 @@ def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch) assert calls["vector_type"] == vector_factory_module.VectorType.TIDB_ON_QDRANT -def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch): +def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE", None) monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE_WHITELIST_ENABLE", False) @@ -343,7 +345,7 @@ def test_create_skips_empty_text_documents_before_embedding(vector_factory_modul vector._vector_processor.create.assert_not_called() -def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch): +def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Field: def in_(self, value): return value @@ -484,7 +486,7 @@ def test_vector_delegation_methods(vector_factory_module): vector._vector_processor.delete_by_metadata_field.assert_called_once_with("doc_id", "doc-1") -def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): +def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch: pytest.MonkeyPatch): vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() @@ -507,7 +509,7 @@ def test_search_by_file_handles_missing_and_existing_upload(vector_factory_modul assert payload["file_id"] == "file-2" -def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch): +def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch: pytest.MonkeyPatch): delete_mock = MagicMock() redis_delete = MagicMock() monkeypatch.setattr(vector_factory_module.redis_client, "delete", redis_delete) @@ -526,7 +528,7 @@ def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, redis_delete.assert_not_called() -def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch): +def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch: pytest.MonkeyPatch): model_manager = MagicMock() model_manager.get_model_instance.return_value = "model-instance" diff --git a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py index e6a06f163e..2e1c5715c2 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py @@ -39,7 +39,7 @@ class TestCSVExtractor: with pytest.raises(ValueError, match="Source column 'missing_col' not found"): extractor.extract() - def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch): + def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=False) def raise_decode(*args, **kwargs): @@ -50,7 +50,7 @@ class TestCSVExtractor: with pytest.raises(RuntimeError, match="Error loading dummy.csv"): extractor.extract() - def test_extract_autodetect_encoding_success(self, monkeypatch): + def test_extract_autodetect_encoding_success(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) attempted_encodings: list[str | None] = [] @@ -75,7 +75,7 @@ class TestCSVExtractor: assert docs[0].page_content == "id: source-1;body: hello" assert attempted_encodings == [None, "bad", "utf-8"] - def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch): + def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) def always_raise(*args, **kwargs): @@ -86,7 +86,7 @@ class TestCSVExtractor: assert extractor.extract() == [] - def test_read_from_file_re_raises_csv_error(self, monkeypatch): + def test_read_from_file_re_raises_csv_error(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv") monkeypatch.setattr(pd, "read_csv", lambda *args, **kwargs: (_ for _ in ()).throw(csv.Error("bad csv"))) diff --git a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py index d2bcc1e2c4..2b42adc716 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py @@ -45,7 +45,7 @@ class _FakeWorkbook: class TestExcelExtractor: - def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch): + def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch: pytest.MonkeyPatch): sheet_with_data = _FakeSheet( header_rows=[("Name", "Link")], data_rows=[ @@ -68,7 +68,7 @@ class TestExcelExtractor: assert docs[1].page_content == '"Name":"";"Link":"123"' assert all(doc.metadata["source"] == "/tmp/sample.xlsx" for doc in docs) - def test_extract_xls_path(self, monkeypatch): + def test_extract_xls_path(self, monkeypatch: pytest.MonkeyPatch): class FakeExcelFile: sheet_names = ["Sheet1"] diff --git a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py index 5beed88971..b4b08f57ec 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py @@ -56,7 +56,7 @@ def _patch_all_extractors(monkeypatch) -> _ExtractorFactory: class TestExtractProcessorLoaders: - def test_load_from_upload_file_return_docs_and_text(self, monkeypatch): + def test_load_from_upload_file_return_docs_and_text(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) monkeypatch.setattr( @@ -93,7 +93,9 @@ class TestExtractProcessorLoaders: ), ], ) - def test_load_from_url_builds_temp_file_with_correct_suffix(self, monkeypatch, url, headers, expected_suffix): + def test_load_from_url_builds_temp_file_with_correct_suffix( + self, monkeypatch: pytest.MonkeyPatch, url, headers, expected_suffix + ): response = SimpleNamespace(headers=headers, content=b"body") monkeypatch.setattr(processor_module.ssrf_proxy, "get", lambda *args, **kwargs: response) monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) @@ -119,11 +121,13 @@ class TestExtractProcessorLoaders: class TestExtractProcessorFileRouting: @pytest.fixture(autouse=True) - def _set_unstructured_config(self, monkeypatch): + def _set_unstructured_config(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_URL", "https://unstructured") monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_KEY", "key") - def _run_extract_for_extension(self, monkeypatch, extension: str, etl_type: str, is_automatic: bool = False): + def _run_extract_for_extension( + self, monkeypatch: pytest.MonkeyPatch, extension: str, etl_type: str, is_automatic: bool = False + ): factory = _patch_all_extractors(monkeypatch) monkeypatch.setattr(processor_module.dify_config, "ETL_TYPE", etl_type) @@ -167,7 +171,7 @@ class TestExtractProcessorFileRouting: ], ) def test_extract_routes_file_extensions_for_unstructured_mode( - self, monkeypatch, extension, expected_extractor, is_automatic + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor, is_automatic ): extractor_name, args, kwargs = self._run_extract_for_extension( monkeypatch, extension, etl_type="Unstructured", is_automatic=is_automatic @@ -189,7 +193,9 @@ class TestExtractProcessorFileRouting: (".txt", "TextExtractor"), ], ) - def test_extract_routes_file_extensions_for_default_mode(self, monkeypatch, extension, expected_extractor): + def test_extract_routes_file_extensions_for_default_mode( + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor + ): extractor_name, _, _ = self._run_extract_for_extension(monkeypatch, extension, etl_type="SelfHosted") assert extractor_name == expected_extractor @@ -202,7 +208,7 @@ class TestExtractProcessorFileRouting: class TestExtractProcessorDatasourceRouting: - def test_extract_routes_notion_datasource(self, monkeypatch): + def test_extract_routes_notion_datasource(self, monkeypatch: pytest.MonkeyPatch): factory = _patch_all_extractors(monkeypatch) notion_info = SimpleNamespace( @@ -228,7 +234,9 @@ class TestExtractProcessorDatasourceRouting: ("jinareader", "JinaReaderWebExtractor"), ], ) - def test_extract_routes_website_datasource_providers(self, monkeypatch, provider: str, expected: str): + def test_extract_routes_website_datasource_providers( + self, monkeypatch: pytest.MonkeyPatch, provider: str, expected: str + ): factory = _patch_all_extractors(monkeypatch) website_info = SimpleNamespace( diff --git a/api/tests/unit_tests/core/rag/extractor/test_helpers.py b/api/tests/unit_tests/core/rag/extractor/test_helpers.py index 74387f749d..1c6f97ec53 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_helpers.py +++ b/api/tests/unit_tests/core/rag/extractor/test_helpers.py @@ -21,7 +21,7 @@ class TestHelpers: # Assert the language field for full coverage assert encodings[0].language is not None - def test_detect_file_encodings_timeout(self, monkeypatch): + def test_detect_file_encodings_timeout(self, monkeypatch: pytest.MonkeyPatch): class FakeFuture: def result(self, timeout=None): raise helpers.concurrent.futures.TimeoutError() @@ -41,7 +41,7 @@ class TestHelpers: with pytest.raises(TimeoutError, match="Timeout reached while detecting encoding"): detect_file_encodings("file.txt", timeout=1) - def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch): + def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch: pytest.MonkeyPatch): class FakeResult: encoding = None coherence = 0.0 diff --git a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py index 7e78c86c7d..8ede44ec04 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py @@ -74,7 +74,7 @@ after assert "[link]" not in tups[1][1] assert "img.png" not in tups[1][1] - def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch): + def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=True) calls: list[str | None] = [] @@ -99,7 +99,7 @@ after assert len(tups) == 2 assert calls == [None, "bad-encoding", "utf-8"] - def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch): + def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=False) def raise_decode(self, encoding=None): @@ -110,7 +110,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch): + def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") def raise_other(self, encoding=None): @@ -121,7 +121,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch): + def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") monkeypatch.setattr(extractor, "parse_tups", lambda _: [(None, "plain"), ("Header", "value")]) diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index 808e41867e..49f7b592dc 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -28,7 +28,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "token" - def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch): + def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -46,7 +46,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "env-token" - def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch): + def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -63,7 +63,7 @@ class TestNotionExtractorInitAndPublicMethods: credential_id="cred", ) - def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch): + def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -83,7 +83,7 @@ class TestNotionExtractorInitAndPublicMethods: load_mock.assert_called_once_with("obj", "page") assert len(docs) == 1 - def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch): + def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -394,7 +394,7 @@ class TestNotionMetadataAndCredentialMethods: assert extractor.update_last_edited_time(None) is None - def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch): + def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -479,7 +479,7 @@ class TestNotionMetadataAndCredentialMethods: with pytest.raises(AssertionError, match="Notion access token is required"): extractor.get_notion_last_edited_time() - def test_get_access_token_success_and_errors(self, monkeypatch): + def test_get_access_token_success_and_errors(self, monkeypatch: pytest.MonkeyPatch): with pytest.raises(Exception, match="No credential id found"): notion_extractor.NotionExtractor._get_access_token("tenant", None) diff --git a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py index 47222a23a2..f2caf02d5e 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py @@ -7,7 +7,7 @@ import core.rag.extractor.pdf_extractor as pe @pytest.fixture -def mock_dependencies(monkeypatch): +def mock_dependencies(monkeypatch: pytest.MonkeyPatch): # Mock storage saves = [] @@ -61,7 +61,9 @@ def mock_dependencies(monkeypatch): (b"\x89PNG\r\n\x1a\n some png", "image/png", "png", "test_file_id_png"), ], ) -def test_extract_images_formats(mock_dependencies, monkeypatch, image_bytes, expected_mime, expected_ext, file_id): +def test_extract_images_formats( + mock_dependencies, monkeypatch: pytest.MonkeyPatch, image_bytes, expected_mime, expected_ext, file_id +): saves = mock_dependencies.saves db_stub = mock_dependencies.db @@ -122,7 +124,7 @@ def test_extract_images_get_objects_scenarios(mock_dependencies, get_objects_sid assert result == "" -def test_extract_calls_extract_images(mock_dependencies, monkeypatch): +def test_extract_calls_extract_images(mock_dependencies, monkeypatch: pytest.MonkeyPatch): # Mock pypdfium2 mock_pdf_doc = MagicMock() mock_page = MagicMock() diff --git a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py index fb3c6e52c6..71046d73af 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py @@ -19,7 +19,7 @@ class TestTextExtractor: assert docs[0].page_content == "hello world" assert docs[0].metadata == {"source": str(file_path)} - def test_extract_autodetect_success_after_decode_error(self, monkeypatch): + def test_extract_autodetect_success_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) calls = [] @@ -44,7 +44,7 @@ class TestTextExtractor: assert docs[0].page_content == "decoded text" assert calls == [None, "bad", "utf-8"] - def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch): + def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) def always_decode_error(self, encoding=None): @@ -56,7 +56,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="all detected encodings failed"): extractor.extract() - def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch): + def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=False) def always_decode_error(self, encoding=None): @@ -67,7 +67,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="specified encoding failed"): extractor.extract() - def test_extract_wraps_non_decode_exceptions(self, monkeypatch): + def test_extract_wraps_non_decode_exceptions(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt") def raise_other(self, encoding=None): diff --git a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py index b9f2449cfb..513d232d7f 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py @@ -61,7 +61,7 @@ def test_parse_row(): assert extractor._parse_row(row, {}, 3) == gt[idx] -def test_init_downloads_via_ssrf_proxy(monkeypatch): +def test_init_downloads_via_ssrf_proxy(monkeypatch: pytest.MonkeyPatch): doc = Document() doc.add_paragraph("hello") buf = io.BytesIO() @@ -97,7 +97,7 @@ def test_init_downloads_via_ssrf_proxy(monkeypatch): extractor.temp_file.close() -def test_extract_images_from_docx(monkeypatch): +def test_extract_images_from_docx(monkeypatch: pytest.MonkeyPatch): external_bytes = b"ext-bytes" internal_bytes = b"int-bytes" @@ -210,7 +210,7 @@ def test_extract_images_from_docx_uses_internal_files_url(): dify_config.INTERNAL_FILES_URL = original_internal_files_url -def test_extract_hyperlinks(monkeypatch): +def test_extract_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage to avoid issues during image extraction (even if no images are present) monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -255,7 +255,7 @@ def test_extract_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_extract_legacy_hyperlinks(monkeypatch): +def test_extract_legacy_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -317,7 +317,7 @@ def test_extract_legacy_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_init_rejects_invalid_url_status(monkeypatch): +def test_init_rejects_invalid_url_status(monkeypatch: pytest.MonkeyPatch): class FakeResponse: status_code = 404 content = b"" @@ -392,7 +392,7 @@ def test_close_closes_awaitable_close_result(): extractor.temp_file.close.assert_called_once() -def test_extract_images_handles_invalid_external_cases(monkeypatch): +def test_extract_images_handles_invalid_external_cases(monkeypatch: pytest.MonkeyPatch): class FakeTargetRef: def __contains__(self, item): return item == "image" @@ -437,7 +437,7 @@ def test_extract_images_handles_invalid_external_cases(monkeypatch): db_stub.session.commit.assert_called_once() -def test_table_to_markdown_and_parse_helpers(monkeypatch): +def test_table_to_markdown_and_parse_helpers(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) table = SimpleNamespace( @@ -500,7 +500,7 @@ def test_table_to_markdown_and_parse_helpers(monkeypatch): assert extractor._parse_cell(cell, image_map) == "EXT-IMGINT-IMGplain" -def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch): +def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) ext_image_id = "ext-image" diff --git a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py index 26ce333e11..19fb385a6d 100644 --- a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py +++ b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py @@ -45,7 +45,7 @@ def _install_chunk_by_title(monkeypatch: pytest.MonkeyPatch, chunks: list[Simple class TestUnstructuredMarkdownMsgXml: - def test_markdown_extractor_without_api(self, monkeypatch): + def test_markdown_extractor_without_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" chunk-1 "), SimpleNamespace(text=" chunk-2 ")]) _register_module( monkeypatch, "unstructured.partition.md", partition_md=lambda filename: [SimpleNamespace(text="x")] @@ -55,7 +55,7 @@ class TestUnstructuredMarkdownMsgXml: assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_markdown_extractor_with_api(self, monkeypatch): + def test_markdown_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" via-api ")]) calls = {} @@ -70,7 +70,7 @@ class TestUnstructuredMarkdownMsgXml: assert docs[0].page_content == "via-api" assert calls == {"filename": "/tmp/file.md", "api_url": "https://u", "api_key": "k"} - def test_msg_extractor_local(self, monkeypatch): + def test_msg_extractor_local(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) _register_module( monkeypatch, "unstructured.partition.msg", partition_msg=lambda filename: [SimpleNamespace(text="x")] @@ -78,7 +78,7 @@ class TestUnstructuredMarkdownMsgXml: assert UnstructuredMsgExtractor("/tmp/file.msg").extract()[0].page_content == "msg-doc" - def test_msg_extractor_with_api(self, monkeypatch): + def test_msg_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) calls = {} @@ -94,7 +94,7 @@ class TestUnstructuredMarkdownMsgXml: ) assert calls["filename"] == "/tmp/file.msg" - def test_xml_extractor_local_and_api(self, monkeypatch): + def test_xml_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="xml-doc")]) xml_calls = {} @@ -124,7 +124,7 @@ class TestUnstructuredMarkdownMsgXml: class TestUnstructuredEmailAndEpub: - def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch): + def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) captured = {} @@ -150,7 +150,7 @@ class TestUnstructuredEmailAndEpub: assert "Hello Email" in chunk_elements[0].text assert chunk_elements[1].text == bad_base64 - def test_email_extractor_with_api(self, monkeypatch): + def test_email_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="api-email")]) _register_module( monkeypatch, @@ -162,7 +162,7 @@ class TestUnstructuredEmailAndEpub: assert docs[0].page_content == "api-email" - def test_epub_extractor_local_and_api(self, monkeypatch): + def test_epub_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="epub-doc")]) calls = {"download": 0, "partition": 0} @@ -198,7 +198,7 @@ class TestUnstructuredPPTAndPPTX: with pytest.raises(NotImplementedError, match="Unstructured API Url is not configured"): UnstructuredPPTExtractor("/tmp/file.ppt").extract() - def test_ppt_extractor_groups_text_by_page(self, monkeypatch): + def test_ppt_extractor_groups_text_by_page(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -215,7 +215,7 @@ class TestUnstructuredPPTAndPPTX: assert [doc.page_content for doc in docs] == ["A\nB", "C"] - def test_pptx_extractor_local_and_api(self, monkeypatch): + def test_pptx_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -244,7 +244,7 @@ class TestUnstructuredPPTAndPPTX: class TestUnstructuredWord: - def _install_doc_modules(self, monkeypatch, version: str, filetype_value): + def _install_doc_modules(self, monkeypatch: pytest.MonkeyPatch, version: str, filetype_value): _register_unstructured_packages(monkeypatch) class FileType: @@ -276,13 +276,13 @@ class TestUnstructuredWord: ], ) - def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch): + def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="doc") with pytest.raises(ValueError, match="Partitioning .doc files is only supported"): UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() - def test_word_extractor_doc_and_docx_paths(self, monkeypatch): + def test_word_extractor_doc_and_docx_paths(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.11", filetype_value="doc") docs = UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() @@ -292,7 +292,7 @@ class TestUnstructuredWord: docs = UnstructuredWordExtractor("/tmp/file.docx", "https://u", "k").extract() assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch): + def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="not-used") monkeypatch.setitem(sys.modules, "magic", None) diff --git a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py index d758be218a..95878fc688 100644 --- a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py @@ -59,7 +59,7 @@ class TestWaterCrawlExceptions: class TestBaseAPIClient: - def test_init_session_builds_expected_headers(self, monkeypatch): + def test_init_session_builds_expected_headers(self, monkeypatch: pytest.MonkeyPatch): captured = {} def fake_client(**kwargs): @@ -74,7 +74,7 @@ class TestBaseAPIClient: assert captured["headers"]["X-API-Key"] == "k" assert captured["headers"]["User-Agent"] == "WaterCrawl-Plugin" - def test_request_stream_and_non_stream_paths(self, monkeypatch): + def test_request_stream_and_non_stream_paths(self, monkeypatch: pytest.MonkeyPatch): class FakeSession: def __init__(self): self.request_calls = [] @@ -106,7 +106,7 @@ class TestBaseAPIClient: assert fake_session.build_calls assert fake_session.send_calls[0][1] is True - def test_http_method_helpers_delegate_to_request(self, monkeypatch): + def test_http_method_helpers_delegate_to_request(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(BaseAPIClient, "init_session", lambda self: MagicMock()) client = BaseAPIClient(api_key="k", base_url="https://watercrawl.dev") @@ -127,7 +127,7 @@ class TestBaseAPIClient: class TestWaterCrawlAPIClient: - def test_process_eventstream_and_download(self, monkeypatch): + def test_process_eventstream_and_download(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = MagicMock() @@ -174,7 +174,7 @@ class TestWaterCrawlAPIClient: client.process_response(_response(200, content_type="application/octet-stream", content=b"bin")) == b"bin" ) - def test_process_response_event_stream_returns_generator(self, monkeypatch): + def test_process_response_event_stream_returns_generator(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") generator = (item for item in [{"type": "result", "data": {}}]) monkeypatch.setattr(client, "process_eventstream", lambda response, download=False: generator) @@ -193,7 +193,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(RuntimeError, match="http error"): client.process_response(response) - def test_endpoint_wrappers(self, monkeypatch): + def test_endpoint_wrappers(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda resp: "processed") @@ -208,7 +208,7 @@ class TestWaterCrawlAPIClient: assert client.download_crawl_request("id") == "processed" assert client.get_crawl_request_results("id") == "processed" - def test_monitor_crawl_request_generator_and_validation(self, monkeypatch): + def test_monitor_crawl_request_generator_and_validation(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda _: (x for x in [{"type": "result", "data": 1}])) @@ -221,7 +221,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(ValueError, match="Generator expected"): list(client.monitor_crawl_request("job-1")) - def test_scrape_url_sync_and_async(self, monkeypatch): + def test_scrape_url_sync_and_async(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "create_crawl_request", lambda **kwargs: {"uuid": "job-1"}) @@ -238,7 +238,7 @@ class TestWaterCrawlAPIClient: sync_result = client.scrape_url("https://example.com", sync=True) assert sync_result == {"url": "https://example.com"} - def test_download_result_fetches_json_and_closes(self, monkeypatch): + def test_download_result_fetches_json_and_closes(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = _response(200, {"markdown": "body"}) @@ -251,7 +251,7 @@ class TestWaterCrawlAPIClient: class TestWaterCrawlProvider: - def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch): + def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") captured_kwargs = {} @@ -290,7 +290,7 @@ class TestWaterCrawlProvider: assert captured_kwargs["page_options"]["only_main_content"] is False assert captured_kwargs["page_options"]["wait_time"] == 1000 - def test_get_crawl_status_active_and_completed(self, monkeypatch): + def test_get_crawl_status_active_and_completed(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( @@ -327,7 +327,7 @@ class TestWaterCrawlProvider: assert completed["status"] == "completed" assert completed["data"] == [{"url": "u"}] - def test_get_crawl_url_data_and_scrape(self, monkeypatch): + def test_get_crawl_url_data_and_scrape(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr(provider, "scrape_url", lambda url: {"source_url": url}) @@ -339,7 +339,7 @@ class TestWaterCrawlProvider: monkeypatch.setattr(provider, "_get_results", lambda job_id, query_params=None: iter([])) assert provider.get_crawl_url_data("job", "u1") is None - def test_structure_data_validation_and_get_results_pagination(self, monkeypatch): + def test_structure_data_validation_and_get_results_pagination(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") with pytest.raises(ValueError, match="Invalid result object"): @@ -380,7 +380,7 @@ class TestWaterCrawlProvider: assert len(results) == 1 assert results[0]["source_url"] == "https://a" - def test_scrape_url_uses_client_and_structure(self, monkeypatch): + def test_scrape_url_uses_client_and_structure(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( provider.client, "scrape_url", lambda **kwargs: {"result": {"metadata": {}, "markdown": "m"}, "url": "u"} @@ -392,7 +392,7 @@ class TestWaterCrawlProvider: class TestWaterCrawlWebExtractor: - def test_extract_crawl_and_scrape_modes(self, monkeypatch): + def test_extract_crawl_and_scrape_modes(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: { @@ -418,7 +418,7 @@ class TestWaterCrawlWebExtractor: assert crawl_extractor.extract()[0].page_content == "crawl" assert scrape_extractor.extract()[0].page_content == "scrape" - def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch): + def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: None, diff --git a/api/tests/unit_tests/core/telemetry/test_facade.py b/api/tests/unit_tests/core/telemetry/test_facade.py index 36e8e1bbb1..95d653f55b 100644 --- a/api/tests/unit_tests/core/telemetry/test_facade.py +++ b/api/tests/unit_tests/core/telemetry/test_facade.py @@ -14,7 +14,7 @@ from core.telemetry.events import TelemetryContext, TelemetryEvent @pytest.fixture -def telemetry_test_setup(monkeypatch): +def telemetry_test_setup(monkeypatch: pytest.MonkeyPatch): module_name = "core.ops.ops_trace_manager" ops_stub = types.ModuleType(module_name) diff --git a/api/tests/unit_tests/core/test_provider_manager.py b/api/tests/unit_tests/core/test_provider_manager.py index 02f12fb3b4..e84fcba3d9 100644 --- a/api/tests/unit_tests/core/test_provider_manager.py +++ b/api/tests/unit_tests/core/test_provider_manager.py @@ -289,7 +289,7 @@ def test_get_default_model_uses_injected_runtime_for_existing_default_record(moc result = manager.get_default_model("tenant-id", ModelType.LLM) - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) assert result is not None assert result.model == "gpt-4" assert result.provider.provider == "openai" @@ -316,7 +316,7 @@ def test_get_configurations_uses_injected_runtime_and_adds_provider_aliases(mock result = manager.get_configurations("tenant-id") expected_alias = str(ModelProviderID("openai")) - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) assert result.tenant_id == "tenant-id" assert expected_alias in provider_records assert expected_alias in provider_model_records @@ -402,7 +402,7 @@ def test_get_configurations_reuses_cached_result_for_same_tenant(mocker: MockerF assert first is second mock_get_all_providers.assert_called_once_with("tenant-id") - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) mock_provider_configuration.assert_called_once() provider_configuration.bind_model_runtime.assert_called_once_with(manager._model_runtime) diff --git a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py index ad6d5906ae..b21a5c3e24 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py @@ -78,7 +78,7 @@ def _tool_yaml() -> dict[str, Any]: } -def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch): +def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch: pytest.MonkeyPatch): yaml_payloads = [_provider_yaml(), _tool_yaml()] def _load_yaml(*args, **kwargs): diff --git a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py index c7829fc0d7..3f6b1ec154 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py @@ -115,7 +115,7 @@ def test_weekday_tool(): list(weekday_tool.invoke(user_id="u", tool_parameters={"year": 2024, "day": 1})) -def test_simple_code_valid_execution(monkeypatch): +def test_simple_code_valid_execution(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -138,7 +138,7 @@ def test_simple_code_invalid_language(): list(simple_code.invoke(user_id="u", tool_parameters={"language": "go", "code": "fmt.Println(1)"})) -def test_simple_code_execution_error(monkeypatch): +def test_simple_code_execution_error(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -155,14 +155,14 @@ def test_webscraper_empty_url(): assert empty == "Please input url" -def test_webscraper_fetch(monkeypatch): +def test_webscraper_fetch(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") full = list(webscraper.invoke(user_id="u", tool_parameters={"url": "https://example.com"}))[0].message.text assert full == "page" -def test_webscraper_summary(monkeypatch): +def test_webscraper_summary(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") monkeypatch.setattr(webscraper, "summary", lambda user_id, content: "summary") @@ -175,7 +175,7 @@ def test_webscraper_summary(monkeypatch): assert summarized == "summary" -def test_webscraper_fetch_error(monkeypatch): +def test_webscraper_fetch_error(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr( "core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", @@ -192,7 +192,7 @@ def test_asr_invalid_file(): assert "not a valid audio file" in invalid_file -def test_asr_valid_file_invocation(monkeypatch): +def test_asr_valid_file_invocation(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) model_instance = type("M", (), {"invoke_speech2text": lambda self, file: "transcript"})() model_manager = type("Mgr", (), {"get_model_instance": lambda *a, **k: model_instance})() @@ -209,7 +209,7 @@ def test_asr_valid_file_invocation(monkeypatch): assert captured_manager_kwargs == {"tenant_id": "tenant-1", "user_id": "u"} -def test_asr_available_models_and_runtime_parameters(monkeypatch): +def test_asr_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) provider_model = type("PM", (), {"provider": "p", "models": [type("Model", (), {"model": "m"})()]})() monkeypatch.setattr( @@ -220,7 +220,7 @@ def test_asr_available_models_and_runtime_parameters(monkeypatch): assert asr.get_runtime_parameters()[0].name == "model" -def test_tts_invoke_returns_messages(monkeypatch): +def test_tts_invoke_returns_messages(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) captured_manager_kwargs = {} voices_model_instance = type( @@ -280,7 +280,7 @@ def test_tts_tool_raises_when_voice_unavailable(monkeypatch, voices): list(tts.invoke(user_id="u", tool_parameters={"model": "p#m", "text": "hello"})) -def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): +def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) model_1 = SimpleNamespace( @@ -307,7 +307,7 @@ def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): assert runtime_parameters[1].name == "voice#provider-a#model-a" -def test_provider_classes_and_builtin_sort(monkeypatch): +def test_provider_classes_and_builtin_sort(monkeypatch: pytest.MonkeyPatch): # Use object.__new__ to avoid YAML-loading __init__; only pass-through validation is exercised. # Ensure pass-through _validate_credentials methods are executed. AudioToolProvider._validate_credentials(object.__new__(AudioToolProvider), "u", {}) diff --git a/api/tests/unit_tests/core/tools/test_custom_tool.py b/api/tests/unit_tests/core/tools/test_custom_tool.py index f35546b025..f525baeaf2 100644 --- a/api/tests/unit_tests/core/tools/test_custom_tool.py +++ b/api/tests/unit_tests/core/tools/test_custom_tool.py @@ -47,7 +47,7 @@ def test_parsed_response_to_string(): assert ParsedResponse("ok", False).to_string() == "ok" -def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch): +def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch: pytest.MonkeyPatch): tool = _build_tool() forked = tool.fork_tool_runtime(ToolRuntime(tenant_id="tenant-2")) assert isinstance(forked, ApiTool) @@ -184,7 +184,7 @@ def test_get_parameter_value_and_type_conversion_helpers(): assert tool._convert_body_property_type({"anyOf": [{"type": "integer"}]}, "2") == 2 -def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch): +def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [ {"name": "id", "in": "path", "required": True, "schema": {"type": "string"}}, @@ -236,7 +236,7 @@ def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch invalid_method_tool.do_http_request("https://api.example.com", "TRACE", headers={}, parameters={}) -def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch): +def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [], "requestBody": { diff --git a/api/tests/unit_tests/core/tools/test_tool_manager.py b/api/tests/unit_tests/core/tools/test_tool_manager.py index c9b3dfb186..7c7d6eec2d 100644 --- a/api/tests/unit_tests/core/tools/test_tool_manager.py +++ b/api/tests/unit_tests/core/tools/test_tool_manager.py @@ -648,7 +648,7 @@ def test_list_default_builtin_providers_for_postgres_and_mysql(): assert providers == provider_records -def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch): +def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch: pytest.MonkeyPatch): hardcoded_controller = SimpleNamespace(entity=SimpleNamespace(identity=SimpleNamespace(name="hardcoded"))) plugin_controller = object.__new__(PluginToolProviderController) plugin_controller.entity = SimpleNamespace(identity=SimpleNamespace(name="plugin-provider")) diff --git a/api/tests/unit_tests/core/tools/utils/test_configuration.py b/api/tests/unit_tests/core/tools/utils/test_configuration.py index ae5638784c..9e179536de 100644 --- a/api/tests/unit_tests/core/tools/utils/test_configuration.py +++ b/api/tests/unit_tests/core/tools/utils/test_configuration.py @@ -4,6 +4,8 @@ from collections.abc import Generator from typing import Any from unittest.mock import patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom from core.helper.tool_parameter_cache import ToolParameterCache from core.tools.__base.tool import Tool @@ -110,7 +112,7 @@ def test_encrypt_tool_parameters(): assert encrypted["plain"] == "x" -def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch): +def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( @@ -139,7 +141,7 @@ def test_delete_tool_parameters_cache(): mock_delete.assert_called_once() -def test_configuration_manager_decrypt_suppresses_errors(monkeypatch): +def test_configuration_manager_decrypt_suppresses_errors(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( patch.object(ToolParameterCache, "get", return_value=None), diff --git a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py index 5f34135af4..354b395504 100644 --- a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py +++ b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py @@ -42,7 +42,7 @@ class _FakeToolFileManager: @pytest.fixture(autouse=True) -def _patch_tool_file_manager(monkeypatch): +def _patch_tool_file_manager(monkeypatch: pytest.MonkeyPatch): # Patch the manager used inside the transformer module monkeypatch.setattr(mt, "ToolFileManager", _FakeToolFileManager) # also ensure predictable URL generation (no need to patch; uses id and extension only) diff --git a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py index 6bb86ebe78..081b189745 100644 --- a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py +++ b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py @@ -34,7 +34,7 @@ def test_system_encrypter_raises_error_for_invalid_ciphertext(): encrypter.decrypt_params("not-base64") -def test_system_helpers_use_global_cached_instance(monkeypatch): +def test_system_helpers_use_global_cached_instance(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(encryption, "_encrypter", None) monkeypatch.setattr("core.tools.utils.system_encryption.dify_config.SECRET_KEY", "global-secret") diff --git a/api/tests/unit_tests/core/variables/test_segment_type.py b/api/tests/unit_tests/core/variables/test_segment_type.py index d4e862220a..baa2ac2dc7 100644 --- a/api/tests/unit_tests/core/variables/test_segment_type.py +++ b/api/tests/unit_tests/core/variables/test_segment_type.py @@ -233,7 +233,7 @@ class TestSegmentTypeAdditionalMethods: assert SegmentType.GROUP.is_valid([StringSegment(value="b")]) is True assert SegmentType.GROUP.is_valid(["not-segment"]) is False - def test_unreachable_assertion_branch(self, monkeypatch): + def test_unreachable_assertion_branch(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(SegmentType, "is_array_type", lambda self: False) with pytest.raises(AssertionError, match="unreachable"): diff --git a/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py b/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py index 5d6667257f..12c7f8113c 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py @@ -1,12 +1,11 @@ +import logging import threading from datetime import datetime from types import SimpleNamespace from unittest.mock import MagicMock, patch -from core.app.entities.app_invoke_entities import DifyRunContext, InvokeFrom, UserFrom from core.app.workflow.layers.llm_quota import LLMQuotaLayer from core.errors.error import QuotaExceededError -from core.model_manager import ModelInstance from graphon.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus from graphon.graph_engine.entities.commands import CommandType from graphon.graph_events import NodeRunSucceededEvent @@ -14,17 +13,7 @@ from graphon.model_runtime.entities.llm_entities import LLMUsage from graphon.node_events import NodeRunResult -def _build_dify_context() -> DifyRunContext: - return DifyRunContext( - tenant_id="tenant-id", - app_id="app-id", - user_id="user-id", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - ) - - -def _build_succeeded_event() -> NodeRunSucceededEvent: +def _build_succeeded_event(*, provider: str = "openai", model_name: str = "gpt-4o") -> NodeRunSucceededEvent: return NodeRunSucceededEvent( id="execution-id", node_id="llm-node-id", @@ -32,113 +21,162 @@ def _build_succeeded_event() -> NodeRunSucceededEvent: start_at=datetime.now(), node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, - inputs={"question": "hello"}, + inputs={ + "question": "hello", + "model_provider": provider, + "model_name": model_name, + }, llm_usage=LLMUsage.empty_usage(), ), ) -def _build_wrapped_model_instance() -> tuple[SimpleNamespace, ModelInstance]: - raw_model_instance = ModelInstance.__new__(ModelInstance) - return SimpleNamespace(_model_instance=raw_model_instance), raw_model_instance +def _build_public_model_identity(*, provider: str = "openai", model_name: str = "gpt-4o") -> SimpleNamespace: + return SimpleNamespace(provider=provider, name=model_name) + + +def _build_node_data(*, model: SimpleNamespace | None = None) -> SimpleNamespace: + return SimpleNamespace( + error_strategy=None, + retry_config=SimpleNamespace(retry_enabled=False), + model=model, + ) + + +def _build_node(*, node_type: BuiltinNodeTypes = BuiltinNodeTypes.LLM) -> MagicMock: + node = MagicMock() + node.id = "node-id" + node.execution_id = "execution-id" + node.node_type = node_type + node.node_data = _build_node_data(model=_build_public_model_identity()) + node.model_instance = SimpleNamespace(provider="stale-provider", model_name="stale-model") + return node + + +class _RunnableQuotaNode: + id = "node-id" + execution_id = "execution-id" + node_type = BuiltinNodeTypes.LLM + title = "LLM node" + + def __init__(self, *, stop_event: threading.Event, node_data: SimpleNamespace | None = None) -> None: + self.node_data = node_data or _build_node_data(model=_build_public_model_identity()) + self.graph_runtime_state = SimpleNamespace(stop_event=stop_event) + self.original_run_called = False + + def _run(self) -> NodeRunResult: + self.original_run_called = True + return NodeRunResult(status=WorkflowNodeExecutionStatus.SUCCEEDED) def test_deduct_quota_called_for_successful_llm_node() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, raw_model_instance = _build_wrapped_model_instance() - + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.LLM) result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_called_once_with( tenant_id="tenant-id", - model_instance=raw_model_instance, + provider="openai", + model="gpt-4o", usage=result_event.node_run_result.llm_usage, ) def test_deduct_quota_called_for_question_classifier_node() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "question-classifier-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.QUESTION_CLASSIFIER - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, raw_model_instance = _build_wrapped_model_instance() + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.QUESTION_CLASSIFIER) + result_event = _build_succeeded_event(provider="anthropic", model_name="claude-3-7-sonnet") - result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_called_once_with( tenant_id="tenant-id", - model_instance=raw_model_instance, + provider="anthropic", + model="claude-3-7-sonnet", usage=result_event.node_run_result.llm_usage, ) def test_non_llm_node_is_ignored() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "start-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.START - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node._model_instance = object() - + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.START) result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_not_called() -def test_quota_error_is_handled_in_layer() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance = object() +def test_precheck_ignores_non_quota_node() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.START) - result_event = _build_succeeded_event() - with patch( - "core.app.workflow.layers.llm_quota.deduct_llm_quota", - autospec=True, - side_effect=ValueError("quota exceeded"), - ): - layer.on_node_run_end(node=node, error=None, result_event=result_event) + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + mock_check.assert_not_called() -def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: - layer = LLMQuotaLayer() +def test_quota_error_is_handled_in_layer(caplog) -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, _ = _build_wrapped_model_instance() + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + result_event = _build_succeeded_event() + + with ( + caplog.at_level(logging.ERROR, logger="core.app.workflow.layers.llm_quota"), + patch( + "core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", + autospec=True, + side_effect=ValueError("quota exceeded"), + ) as mock_deduct, + ): + layer.on_node_run_end(node=node, error=None, result_event=result_event) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=result_event.node_run_result.llm_usage, + ) + assert "LLM quota deduction failed, node_id=node-id" in caplog.text + assert not stop_event.is_set() + layer.command_channel.send_command.assert_not_called() + + +def test_send_abort_command_is_noop_without_channel_or_after_abort() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + + layer._send_abort_command(reason="no channel") + + layer.command_channel = MagicMock() + layer._abort_sent = True + layer._send_abort_command(reason="already aborted") + + layer.command_channel.send_command.assert_not_called() + + +def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event result_event = _build_succeeded_event() with patch( - "core.app.workflow.layers.llm_quota.deduct_llm_quota", + "core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True, side_effect=QuotaExceededError("No credits remaining"), ): @@ -152,19 +190,16 @@ def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: def test_quota_precheck_failure_aborts_workflow_immediately() -> None: - layer = LLMQuotaLayer() + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.node_type = BuiltinNodeTypes.LLM - node.model_instance, _ = _build_wrapped_model_instance() + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event with patch( - "core.app.workflow.layers.llm_quota.ensure_llm_quota_available", + "core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True, side_effect=QuotaExceededError("Model provider openai quota exceeded."), ): @@ -177,21 +212,140 @@ def test_quota_precheck_failure_aborts_workflow_immediately() -> None: assert abort_command.reason == "Model provider openai quota exceeded." -def test_quota_precheck_passes_without_abort() -> None: - layer = LLMQuotaLayer() +def test_quota_precheck_failure_blocks_current_node_run() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.node_type = BuiltinNodeTypes.LLM - node.model_instance, raw_model_instance = _build_wrapped_model_instance() + node = _RunnableQuotaNode(stop_event=stop_event) + + with patch( + "core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", + autospec=True, + side_effect=QuotaExceededError("Model provider openai quota exceeded."), + ): + layer.on_node_run_start(node) + + result = node._run() + assert not node.original_run_called + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == "Model provider openai quota exceeded." + assert result.error_type == QuotaExceededError.__name__ + + +def test_missing_model_identity_blocks_current_node_run() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _RunnableQuotaNode(stop_event=stop_event, node_data=_build_node_data()) + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + result = node._run() + assert not node.original_run_called + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == "LLM quota check requires public node model identity before execution." + assert result.error_type == "LLMQuotaIdentityError" + mock_check.assert_not_called() + + +def test_quota_precheck_passes_without_abort() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event - with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available", autospec=True) as mock_check: + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: layer.on_node_run_start(node) assert not stop_event.is_set() - mock_check.assert_called_once_with(model_instance=raw_model_instance) + mock_check.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) layer.command_channel.send_command.assert_not_called() + + +def test_precheck_reads_model_identity_from_data_when_node_data_is_absent() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = SimpleNamespace( + id="node-id", + node_type=BuiltinNodeTypes.LLM, + data=_build_node_data(model=_build_public_model_identity(provider="anthropic", model_name="claude")), + ) + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + mock_check.assert_called_once_with( + tenant_id="tenant-id", + provider="anthropic", + model="claude", + ) + + +def test_precheck_rejects_invalid_public_model_identity() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.node_data = _build_node_data(model=_build_public_model_identity(provider="", model_name="gpt-4o")) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + assert stop_event.is_set() + mock_check.assert_not_called() + layer.command_channel.send_command.assert_called_once() + + +def test_precheck_requires_public_node_model_config() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.node_data = _build_node_data() + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + assert stop_event.is_set() + mock_check.assert_not_called() + layer.command_channel.send_command.assert_called_once() + abort_command = layer.command_channel.send_command.call_args.args[0] + assert abort_command.command_type == CommandType.ABORT + assert abort_command.reason == "LLM quota check requires public node model identity before execution." + + +def test_deduction_requires_public_event_model_identity() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + result_event = _build_succeeded_event() + result_event.node_run_result.inputs = {"question": "hello"} + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: + layer.on_node_run_end(node=node, error=None, result_event=result_event) + + assert stop_event.is_set() + mock_deduct.assert_not_called() + layer.command_channel.send_command.assert_called_once() + abort_command = layer.command_channel.send_command.call_args.args[0] + assert abort_command.command_type == CommandType.ABORT + assert abort_command.reason == "LLM quota deduction requires model identity in the node result event." diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py index 9f3e3b00b9..c721c7b0eb 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py @@ -96,7 +96,7 @@ class MockNodeFactory(DifyNodeFactory): if node_type == BuiltinNodeTypes.CODE: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -106,7 +106,7 @@ class MockNodeFactory(DifyNodeFactory): elif node_type == BuiltinNodeTypes.HTTP_REQUEST: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -122,7 +122,7 @@ class MockNodeFactory(DifyNodeFactory): }: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -132,7 +132,7 @@ class MockNodeFactory(DifyNodeFactory): else: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py index f9819c47ec..e0eb4e7361 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py @@ -56,7 +56,7 @@ class MockNodeMixin: def __init__( self, node_id: str, - config: Any, + data: Any, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", @@ -98,7 +98,7 @@ class MockNodeMixin: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, **kwargs, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py index 75bc6d05f7..6156f7b576 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py @@ -111,7 +111,7 @@ class StaticRepo(HumanInputFormRepository): def _build_runtime_state() -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -140,7 +140,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor start_config = {"id": "start", "data": StartNodeData(title="Start", variables=[]).model_dump()} start_node = StartNode( node_id=start_config["id"], - config=StartNodeData(title="Start", variables=[]), + data=StartNodeData(title="Start", variables=[]), graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) @@ -155,7 +155,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_a_config = {"id": "human_a", "data": human_data.model_dump()} human_a = HumanInputNode( node_id=human_a_config["id"], - config=human_data, + data=human_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, form_repository=repo, @@ -165,7 +165,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_b_config = {"id": "human_b", "data": human_data.model_dump()} human_b = HumanInputNode( node_id=human_b_config["id"], - config=human_data, + data=human_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, form_repository=repo, @@ -183,7 +183,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor end_config = {"id": "end", "data": end_data.model_dump()} end_node = EndNode( node_id=end_config["id"], - config=end_data, + data=end_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py index ae9dae0646..2603e29be6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py +++ b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py @@ -1,41 +1,36 @@ import time import uuid -from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom -from core.workflow.node_factory import DifyNodeFactory from core.workflow.system_variables import build_system_variables -from extensions.ext_database import db from graphon.enums import WorkflowNodeExecutionStatus -from graphon.graph import Graph from graphon.nodes.answer.answer_node import AnswerNode from graphon.nodes.answer.entities import AnswerNodeData from graphon.runtime import GraphRuntimeState, VariablePool from tests.workflow_test_utils import build_test_graph_init_params -def test_execute_answer(): +def _build_variable_pool() -> VariablePool: + return VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="aaa", files=[]), + user_inputs={}, + ) + + +def _build_answer_node(*, answer: str, variable_pool: VariablePool) -> AnswerNode: graph_config = { - "edges": [ - { - "id": "start-source-answer-target", - "source": "start", - "target": "answer", - }, - ], + "edges": [], "nodes": [ - {"data": {"type": "start", "title": "Start"}, "id": "start"}, { "data": { - "title": "123", + "title": "Answer", "type": "answer", - "answer": "Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + "answer": answer, }, "id": "answer", - }, + } ], } - init_params = build_test_graph_init_params( workflow_id="1", graph_config=graph_config, @@ -46,42 +41,31 @@ def test_execute_answer(): invoke_from=InvokeFrom.DEBUGGER, call_depth=0, ) - - # construct variable pool - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], + graph_runtime_state = GraphRuntimeState( + variable_pool=variable_pool, + start_at=time.perf_counter(), ) - variable_pool.add(["start", "weather"], "sunny") - variable_pool.add(["llm", "text"], "You are a helpful AI.") - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - # create node factory - node_factory = DifyNodeFactory( - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - ) - - graph = Graph.init(graph_config=graph_config, node_factory=node_factory, root_node_id="start") - - node = AnswerNode( + return AnswerNode( node_id=str(uuid.uuid4()), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", + data=AnswerNodeData( + title="Answer", type="answer", - answer="Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + answer=answer, ), ) - # Mock db.session.close() - db.session.close = MagicMock() - # execute node +def test_execute_answer_renders_variable_selectors() -> None: + variable_pool = _build_variable_pool() + variable_pool.add(["start", "weather"], "sunny") + variable_pool.add(["llm", "text"], "You are a helpful AI.") + node = _build_answer_node( + answer="Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + variable_pool=variable_pool, + ) + result = node._run() assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED @@ -89,36 +73,11 @@ def test_execute_answer(): def test_execute_answer_renders_structured_output_object_as_json() -> None: - init_params = build_test_graph_init_params( - workflow_id="1", - graph_config={"nodes": [], "edges": []}, - tenant_id="1", - app_id="1", - user_id="1", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - call_depth=0, - ) - - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], - ) + variable_pool = _build_variable_pool() variable_pool.add(["1777539038857", "structured_output"], {"type": "greeting"}) - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - node = AnswerNode( - node_id=str(uuid.uuid4()), - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", - type="answer", - answer="{{#1777539038857.structured_output#}}", - ), + node = _build_answer_node( + answer="{{#1777539038857.structured_output#}}", + variable_pool=variable_pool, ) result = node._run() @@ -128,35 +87,9 @@ def test_execute_answer_renders_structured_output_object_as_json() -> None: def test_execute_answer_falls_back_to_plain_selector_text_when_structured_output_missing() -> None: - init_params = build_test_graph_init_params( - workflow_id="1", - graph_config={"nodes": [], "edges": []}, - tenant_id="1", - app_id="1", - user_id="1", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - call_depth=0, - ) - - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], - ) - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - node = AnswerNode( - node_id=str(uuid.uuid4()), - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", - type="answer", - answer="{{#1777539038857.structured_output#}}", - ), + node = _build_answer_node( + answer="{{#1777539038857.structured_output#}}", + variable_pool=_build_variable_pool(), ) result = node._run() diff --git a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py index d7ef781732..235d56e989 100644 --- a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GraphParams: call_depth = 0 -def test_datasource_node_delegates_to_manager_stream(mocker): +def test_datasource_node_delegates_to_manager_stream(mocker: MockerFixture): # prepare sys variables sys_vars = { "sys": { @@ -79,7 +81,7 @@ def test_datasource_node_delegates_to_manager_stream(mocker): node = DatasourceNode( node_id="n", - config=DatasourceNodeData( + data=DatasourceNodeData( type="datasource", version="1", title="Datasource", diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py index be7cc073db..796fc7719d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py @@ -29,7 +29,7 @@ HTTP_REQUEST_CONFIG = HttpRequestNodeConfig( def test_executor_with_json_body_and_number_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -85,7 +85,7 @@ def test_executor_with_json_body_and_number_variable(): def test_executor_with_json_body_and_object_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -143,7 +143,7 @@ def test_executor_with_json_body_and_object_variable(): def test_executor_with_json_body_and_nested_object_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -201,7 +201,7 @@ def test_executor_with_json_body_and_nested_object_variable(): def test_extract_selectors_from_template_with_newline(): - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) variable_pool.add(("node_id", "custom_query"), "line1\nline2") node_data = HttpRequestNodeData( title="Test JSON Body with Nested Object Variable", @@ -230,7 +230,7 @@ def test_extract_selectors_from_template_with_newline(): def test_executor_with_form_data(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -320,7 +320,7 @@ def test_init_headers(): node_data=node_data, timeout=timeout, http_request_config=HTTP_REQUEST_CONFIG, - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), http_client=ssrf_proxy, file_manager=file_manager, ) @@ -357,7 +357,7 @@ def test_init_params(): node_data=node_data, timeout=timeout, http_request_config=HTTP_REQUEST_CONFIG, - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), http_client=ssrf_proxy, file_manager=file_manager, ) @@ -390,7 +390,7 @@ def test_init_params(): def test_empty_api_key_raises_error_bearer(): """Test that empty API key raises AuthorizationConfigError for bearer auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -417,7 +417,7 @@ def test_empty_api_key_raises_error_bearer(): def test_empty_api_key_raises_error_basic(): """Test that empty API key raises AuthorizationConfigError for basic auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -444,7 +444,7 @@ def test_empty_api_key_raises_error_basic(): def test_empty_api_key_raises_error_custom(): """Test that empty API key raises AuthorizationConfigError for custom auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -471,7 +471,7 @@ def test_empty_api_key_raises_error_custom(): def test_whitespace_only_api_key_raises_error(): """Test that whitespace-only API key raises AuthorizationConfigError.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -498,7 +498,7 @@ def test_whitespace_only_api_key_raises_error(): def test_valid_api_key_works(): """Test that valid API key works correctly for bearer auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -536,7 +536,7 @@ def test_executor_with_json_body_and_unquoted_uuid_variable(): # UUID that triggers the json_repair truncation bug test_uuid = "57eeeeb1-450b-482c-81b9-4be77e95dee2" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -583,7 +583,7 @@ def test_executor_with_json_body_and_unquoted_uuid_with_newlines(): """ test_uuid = "57eeeeb1-450b-482c-81b9-4be77e95dee2" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -624,7 +624,7 @@ def test_executor_with_json_body_and_unquoted_uuid_with_newlines(): def test_executor_with_json_body_preserves_numbers_and_strings(): """Test that numbers are preserved and string values are properly quoted.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py index 2e89a2da3c..afde541beb 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py @@ -110,12 +110,15 @@ def _build_http_node( call_depth=0, ) graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=time.perf_counter(), ) return HttpRequestNode( node_id="http-node", - config=HttpRequestNodeData.model_validate(node_data), + data=HttpRequestNodeData.model_validate(node_data), graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py index 0659984c76..715292b85c 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py @@ -149,7 +149,7 @@ def _build_human_input_node( ) return HumanInputNode( node_id=node_id, - config=typed_node_data, + data=typed_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, runtime=runtime, @@ -241,16 +241,16 @@ class TestUserAction: def test_user_action_length_boundaries(self): """Test user action id and title length boundaries.""" - action = UserAction(id="a" * 20, title="b" * 20) + action = UserAction(id="a" * 20, title="b" * 100) assert action.id == "a" * 20 - assert action.title == "b" * 20 + assert action.title == "b" * 100 @pytest.mark.parametrize( ("field_name", "value"), [ ("id", "a" * 21), - ("title", "b" * 21), + ("title", "b" * 101), ], ) def test_user_action_length_limits(self, field_name: str, value: str): @@ -427,7 +427,7 @@ class TestHumanInputNodeVariableResolution: """Tests for resolving variable-based defaults in HumanInputNode.""" def test_resolves_variable_defaults(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -504,7 +504,7 @@ class TestHumanInputNodeVariableResolution: assert params.resolved_default_values == expected_values def test_debugger_falls_back_to_recipient_token_when_webapp_disabled(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -565,7 +565,7 @@ class TestHumanInputNodeVariableResolution: assert not hasattr(pause_event.reason, "form_token") def test_webapp_runtime_keeps_form_visible_in_ui_when_webapp_delivery_is_enabled(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -631,7 +631,7 @@ class TestHumanInputNodeVariableResolution: assert params.display_in_ui is True def test_debugger_debug_mode_overrides_email_recipients(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user-123", app_id="app", @@ -748,7 +748,7 @@ class TestHumanInputNodeRenderedContent: """Tests for rendering submitted content.""" def test_replaces_outputs_placeholders_after_submission(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py index 4a9438b14f..741b104393 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py @@ -40,7 +40,7 @@ def _create_human_input_node( ) return HumanInputNode( node_id=config["id"], - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, form_repository=repo, @@ -51,7 +51,11 @@ def _create_human_input_node( def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name#}}") -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), + variable_pool=VariablePool.from_bootstrap( + system_variables=system_variables, + user_inputs={}, + environment_variables=[], + ), start_at=0.0, ) graph_init_params = GraphInitParams( @@ -114,7 +118,11 @@ def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name# def _build_timeout_node() -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), + variable_pool=VariablePool.from_bootstrap( + system_variables=system_variables, + user_inputs={}, + environment_variables=[], + ), start_at=0.0, ) graph_init_params = GraphInitParams( diff --git a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py index 8ffce39cd6..18ed7a0b1d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py +++ b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py @@ -32,7 +32,7 @@ class _MissingGraphBuilder: def _build_runtime_state() -> GraphRuntimeState: return GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables(), user_inputs={}), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}), start_at=0.0, ) @@ -46,7 +46,7 @@ def _build_iteration_node( init_params = build_test_graph_init_params(graph_config=graph_config) return IterationNode( node_id="iteration-node", - config=IterationNodeData( + data=IterationNodeData( type="iteration", title="Iteration", iterator_selector=["start", "items"], diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py index f254fc3d09..0d760a2db7 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.rag.index_processor.constant.index_type import IndexTechniqueType @@ -40,7 +41,7 @@ def mock_graph_init_params(): @pytest.fixture def mock_graph_runtime_state(): """Create mock GraphRuntimeState.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id=str(uuid.uuid4()), files=[]), user_inputs={}, environment_variables=[], @@ -50,7 +51,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_index_processor(mocker): +def mock_index_processor(mocker: MockerFixture): """Create mock IndexProcessorProtocol.""" mock_processor = Mock(spec=IndexProcessorProtocol) mocker.patch( @@ -61,7 +62,7 @@ def mock_index_processor(mocker): @pytest.fixture -def mock_summary_index_service(mocker): +def mock_summary_index_service(mocker: MockerFixture): """Create mock SummaryIndexServiceProtocol.""" mock_service = Mock(spec=SummaryIndexServiceProtocol) mocker.patch( @@ -102,7 +103,7 @@ def _build_node( ) -> KnowledgeIndexNode: return KnowledgeIndexNode( node_id=node_id, - config=( + data=( node_data if isinstance(node_data, KnowledgeIndexNodeData) else KnowledgeIndexNodeData.model_validate(node_data) diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py index e923ee761b..3c821e75ba 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.workflow.nodes.knowledge_retrieval.entities import ( @@ -46,7 +47,7 @@ def mock_graph_init_params(): @pytest.fixture def mock_graph_runtime_state(): """Create mock GraphRuntimeState.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id=str(uuid.uuid4()), files=[]), user_inputs={}, environment_variables=[], @@ -56,7 +57,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_rag_retrieval(mocker): +def mock_rag_retrieval(mocker: MockerFixture): """Create mock RAGRetrievalProtocol.""" mock_retrieval = Mock(spec=RAGRetrievalProtocol) mock_retrieval.knowledge_retrieval.return_value = [] @@ -117,7 +118,7 @@ class TestKnowledgeRetrievalNode: # Act node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -146,7 +147,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -205,7 +206,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -249,7 +250,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -285,7 +286,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -320,7 +321,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -361,7 +362,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -400,7 +401,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -481,7 +482,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -518,7 +519,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -573,7 +574,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -621,7 +622,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -682,7 +683,7 @@ class TestFetchDatasetRetriever: config = {"id": node_id, "data": node_data.model_dump()} node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py b/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py index 388654f279..20b94d5d50 100644 --- a/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py +++ b/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py @@ -16,10 +16,10 @@ class TestListOperatorNode: """Comprehensive tests for ListOperatorNode.""" @staticmethod - def _build_node(*, config, graph_init_params, graph_runtime_state): + def _build_node(*, data, graph_init_params, graph_runtime_state): return ListOperatorNode( node_id="test", - config=config if isinstance(config, ListOperatorNodeData) else ListOperatorNodeData.model_validate(config), + data=data if isinstance(data, ListOperatorNodeData) else ListOperatorNodeData.model_validate(data), graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) @@ -65,7 +65,7 @@ class TestListOperatorNode: def _create_node(config, mock_variable): mock_graph_runtime_state.variable_pool.get.return_value = mock_variable return self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -83,7 +83,7 @@ class TestListOperatorNode: } node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -127,7 +127,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -153,7 +153,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -177,7 +177,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -201,7 +201,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -228,7 +228,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -255,7 +255,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -282,7 +282,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -312,7 +312,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -335,7 +335,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = None node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -359,7 +359,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -384,7 +384,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -408,7 +408,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -432,7 +432,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -456,7 +456,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -483,7 +483,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py index c707cf28cd..fb50723402 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py @@ -15,7 +15,7 @@ from core.app.llm.model_access import ( ) from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import CustomConfiguration, SystemConfiguration -from core.plugin.impl.model_runtime_factory import create_plugin_model_runtime +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from core.prompt.entities.advanced_prompt_entities import MemoryConfig from core.workflow.system_variables import default_system_variables from graphon.entities import GraphInitParams @@ -187,7 +187,7 @@ def graph_init_params() -> GraphInitParams: @pytest.fixture def graph_runtime_state() -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -208,7 +208,7 @@ def llm_node( http_client = mock.MagicMock() node = LLMNode( node_id="1", - config=llm_node_data, + data=llm_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, credentials_provider=mock_credentials_provider, @@ -222,7 +222,7 @@ def llm_node( @pytest.fixture -def model_config(monkeypatch): +def model_config(monkeypatch: pytest.MonkeyPatch): from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass def mock_model_providers(_self): @@ -241,9 +241,10 @@ def model_config(monkeypatch): ) # Create actual provider and model type instances - model_provider_factory = ModelProviderFactory(model_runtime=create_plugin_model_runtime(tenant_id="test")) + model_assembly = create_plugin_model_assembly(tenant_id="test") + model_provider_factory = model_assembly.model_provider_factory provider_instance = model_provider_factory.get_model_provider("openai") - model_type_instance = model_provider_factory.get_model_type_instance("openai", ModelType.LLM) + model_type_instance = model_assembly.create_model_type_instance(provider="openai", model_type=ModelType.LLM) # Create a ProviderModelBundle provider_model_bundle = ProviderModelBundle( @@ -1173,7 +1174,7 @@ def llm_node_for_multimodal(llm_node_data, graph_init_params, graph_runtime_stat http_client = mock.MagicMock() node = LLMNode( node_id="1", - config=llm_node_data, + data=llm_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, credentials_provider=mock_credentials_provider, @@ -1276,7 +1277,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown: mock_file_saver.save_binary_string.assert_not_called() mock_file_saver.save_remote_url.assert_not_called() - def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch): + def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch: pytest.MonkeyPatch): llm_node, mock_file_saver = llm_node_for_multimodal image_raw_data = b"PNG_DATA" diff --git a/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py b/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py index 892f6cc586..dd57dde1fe 100644 --- a/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py +++ b/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py @@ -28,7 +28,7 @@ def _build_template_transform_node( ) return TemplateTransformNode( node_id=node_id, - config=typed_node_data, + data=typed_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, **kwargs, diff --git a/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py b/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py index a846efbb43..c25ac7da0f 100644 --- a/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py @@ -39,7 +39,7 @@ def mock_graph_runtime_state(): def test_node_uses_default_max_output_length_when_not_overridden(graph_init_params, mock_graph_runtime_state): node = TemplateTransformNode( node_id="test_node", - config=TemplateTransformNodeData( + data=TemplateTransformNodeData( title="Template Transform", type="template-transform", variables=[], diff --git a/api/tests/unit_tests/core/workflow/nodes/test_base_node.py b/api/tests/unit_tests/core/workflow/nodes/test_base_node.py index 364408ead6..a05151f79b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_base_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_base_node.py @@ -35,7 +35,10 @@ def _build_context(graph_config: Mapping[str, object]) -> tuple[GraphInitParams, invoke_from="debugger", ) runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=0.0, ) return init_params, runtime_state @@ -62,7 +65,7 @@ def test_node_hydrates_data_during_initialization(): node = _SampleNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) @@ -82,13 +85,16 @@ def test_node_accepts_invoke_from_enum(): invoke_from=InvokeFrom.DEBUGGER, ) runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=0.0, ) node = _SampleNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) @@ -140,7 +146,7 @@ def test_node_hydration_preserves_compatibility_extra_fields(): node = _SampleNode( node_id="node-1", - config=node_config["data"], + data=node_config["data"], graph_init_params=init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py index dd75b32593..4c67f3fb02 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py @@ -49,7 +49,7 @@ def document_extractor_node(graph_init_params): http_client = Mock() node = DocumentExtractorNode( node_id="test_node_id", - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=Mock(), http_client=http_client, @@ -186,12 +186,13 @@ def test_run_extract_text( monkeypatch.setattr("graphon.file.file_manager.download", mock_download) + dispatch_mock = None if mime_type == "application/pdf": - mock_pdf_extract = Mock(return_value=expected_text[0]) - monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_from_pdf", mock_pdf_extract) + dispatch_mock = Mock(return_value=expected_text[0]) + monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_file_extension", dispatch_mock) elif mime_type.startswith("application/vnd.openxmlformats"): - mock_docx_extract = Mock(return_value=expected_text[0]) - monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_from_docx", mock_docx_extract) + dispatch_mock = Mock(return_value=expected_text[0]) + monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_mime_type", dispatch_mock) result = document_extractor_node._run() @@ -200,6 +201,19 @@ def test_run_extract_text( assert result.outputs is not None assert result.outputs["text"] == ArrayStringSegment(value=expected_text) + if mime_type == "application/pdf": + dispatch_mock.assert_called_once_with( + file_content=file_content, + file_extension=extension, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + elif mime_type.startswith("application/vnd.openxmlformats"): + dispatch_mock.assert_called_once_with( + file_content=file_content, + mime_type=mime_type, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + if transfer_method == FileTransferMethod.REMOTE_URL: document_extractor_node._http_client.get.assert_called_once_with("https://example.com/file.txt") elif transfer_method == FileTransferMethod.LOCAL_FILE: @@ -439,24 +453,42 @@ def test_extract_text_from_file_routes_excel_inputs(document_extractor_node, ext file.extension = extension file.mime_type = mime_type - with ( - patch( - "graphon.nodes.document_extractor.node._download_file_content", - return_value=b"excel", - ), - patch( - "graphon.nodes.document_extractor.node._extract_text_from_excel", - return_value="excel text", - ) as mock_extract, + with patch( + "graphon.nodes.document_extractor.node._download_file_content", + return_value=b"excel", ): - result = _extract_text_from_file( - document_extractor_node.http_client, - file, - unstructured_api_config=document_extractor_node._unstructured_api_config, - ) + if extension: + with patch( + "graphon.nodes.document_extractor.node._extract_text_by_file_extension", + return_value="excel text", + ) as mock_extract: + result = _extract_text_from_file( + document_extractor_node.http_client, + file, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + mock_extract.assert_called_once_with( + file_content=b"excel", + file_extension=extension, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + else: + with patch( + "graphon.nodes.document_extractor.node._extract_text_by_mime_type", + return_value="excel text", + ) as mock_extract: + result = _extract_text_from_file( + document_extractor_node.http_client, + file, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + mock_extract.assert_called_once_with( + file_content=b"excel", + mime_type=mime_type, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) assert result == "excel text" - mock_extract.assert_called_once_with(b"excel") def test_extract_text_from_file_rejects_missing_extension_and_mime_type(document_extractor_node): diff --git a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py index aa9a1360b0..5965645c4f 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py @@ -29,7 +29,7 @@ def _build_if_else_node( node_id=str(uuid.uuid4()), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, - config=node_data if isinstance(node_data, IfElseNodeData) else IfElseNodeData.model_validate(node_data), + data=node_data if isinstance(node_data, IfElseNodeData) else IfElseNodeData.model_validate(node_data), ) @@ -48,7 +48,10 @@ def test_execute_if_else_result_true(): ) # construct variable pool - pool = VariablePool(system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}) + pool = VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="aaa", files=[]), + user_inputs={}, + ) pool.add(["start", "array_contains"], ["ab", "def"]) pool.add(["start", "array_not_contains"], ["ac", "def"]) pool.add(["start", "contains"], "cabcde") @@ -148,7 +151,7 @@ def test_execute_if_else_result_false(): ) # construct variable pool - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -305,7 +308,7 @@ def test_execute_if_else_boolean_conditions(condition: Condition): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) @@ -359,7 +362,7 @@ def test_execute_if_else_boolean_false_conditions(): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) @@ -424,7 +427,7 @@ def test_execute_if_else_boolean_cases_structure(): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index 465a4c0ff4..1b4cecc757 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -22,7 +22,7 @@ from graphon.variables import ArrayFileSegment def _build_list_operator_node(node_data: ListOperatorNodeData, graph_init_params) -> ListOperatorNode: return ListOperatorNode( node_id="test_node_id", - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=MagicMock(), ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py b/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py index 5655f80737..f890f79511 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py @@ -31,7 +31,7 @@ def make_start_node(user_inputs, variables): return StartNode( node_id="start", - config=node_data, + data=node_data, graph_init_params=build_test_graph_init_params( workflow_id="wf", graph_config={}, @@ -260,7 +260,7 @@ def test_start_node_outputs_full_variable_pool_snapshot(): graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) node = StartNode( node_id="start", - config=node_data, + data=node_data, graph_init_params=build_test_graph_init_params( workflow_id="wf", graph_config={}, diff --git a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py index 284af68319..4aa5803ac7 100644 --- a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py @@ -99,7 +99,7 @@ def tool_node(monkeypatch) -> ToolNode: call_depth=0, ) - variable_pool = VariablePool(system_variables=build_system_variables(user_id="user-id")) + variable_pool = VariablePool.from_bootstrap(system_variables=build_system_variables(user_id="user-id")) graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=0.0) config = graph_config["nodes"][0] @@ -110,7 +110,7 @@ def tool_node(monkeypatch) -> ToolNode: node = ToolNode( node_id="node-instance", - config=ToolNodeData.model_validate(config["data"]), + data=ToolNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, tool_file_manager_factory=tool_file_manager_factory, diff --git a/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py b/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py index e3b5e3b591..c5ac8d2ce2 100644 --- a/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py @@ -44,7 +44,7 @@ def test_trigger_event_node_run_populates_trigger_info_metadata() -> None: init_params, runtime_state = _build_context(graph_config={}) node = TriggerEventNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py index 07d03bec05..fccb5ab1c3 100644 --- a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py +++ b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py @@ -52,7 +52,7 @@ def create_webhook_node( node = TriggerWebhookNode( node_id="webhook-node-1", - config=webhook_data, + data=webhook_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py index b839490d3c..c5ae542d8b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py @@ -44,7 +44,7 @@ def create_webhook_node(webhook_data: WebhookData, variable_pool: VariablePool) ) node = TriggerWebhookNode( node_id="1", - config=webhook_data, + data=webhook_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/test_node_factory.py b/api/tests/unit_tests/core/workflow/test_node_factory.py index 1821f72e0c..d6159e84d4 100644 --- a/api/tests/unit_tests/core/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/workflow/test_node_factory.py @@ -1,3 +1,4 @@ +from collections.abc import Mapping from types import SimpleNamespace from unittest.mock import MagicMock, patch, sentinel @@ -11,19 +12,20 @@ from graphon.entities.base_node_data import BaseNodeData from graphon.enums import BuiltinNodeTypes, NodeType from graphon.nodes.code.entities import CodeLanguage from graphon.nodes.llm.entities import LLMNodeData +from graphon.nodes.llm.node import LLMNode from graphon.variables.segments import StringSegment -def _assert_typed_node_config(config, *, node_id: str, node_type: NodeType, version: str = "1") -> None: +def _assert_constructor_node_data(data, *, node_id: str, node_type: NodeType, version: str = "1") -> None: _ = node_id - if isinstance(config, BaseNodeData): - assert config.type == node_type - assert config.version == version + if isinstance(data, BaseNodeData): + assert data.type == node_type + assert data.version == version return - assert isinstance(config, dict) - assert config["type"] == node_type - assert config["version"] == version + assert isinstance(data, Mapping) + assert data["type"] == node_type + assert data.get("version", "1") == version def _node_constructor(*, return_value): @@ -88,7 +90,7 @@ class TestFetchMemory: assert result is None - def test_returns_none_when_conversation_does_not_exist(self, monkeypatch): + def test_returns_none_when_conversation_does_not_exist(self, monkeypatch: pytest.MonkeyPatch): class FakeSelect: def where(self, *_args): return self @@ -119,7 +121,7 @@ class TestFetchMemory: assert result is None - def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch): + def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch: pytest.MonkeyPatch): conversation = sentinel.conversation memory = sentinel.memory @@ -189,7 +191,7 @@ class TestDifyGraphInitContext: class TestDefaultWorkflowCodeExecutor: - def test_execute_delegates_to_code_executor(self, monkeypatch): + def test_execute_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): executor = node_factory.DefaultWorkflowCodeExecutor() execute_workflow_code_template = MagicMock(return_value={"answer": "ok"}) monkeypatch.setattr( @@ -219,7 +221,7 @@ class TestDefaultWorkflowCodeExecutor: class TestCodeExecutorJinja2TemplateRenderer: - def test_render_template_delegates_to_code_executor(self, monkeypatch): + def test_render_template_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() execute_workflow_code_template = MagicMock(return_value={"result": "Hello workflow"}) monkeypatch.setattr( @@ -237,7 +239,7 @@ class TestCodeExecutorJinja2TemplateRenderer: inputs={"name": "workflow"}, ) - def test_render_template_wraps_code_execution_errors(self, monkeypatch): + def test_render_template_wraps_code_execution_errors(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() monkeypatch.setattr( workflow_template_rendering.CodeExecutor, @@ -434,7 +436,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: missing"): factory.create_node({"id": "node-id", "data": {"type": "missing"}}) - def test_rejects_missing_class_mapping(self, monkeypatch, factory): + def test_rejects_missing_class_mapping(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -444,7 +446,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_rejects_missing_latest_class(self, monkeypatch, factory): + def test_rejects_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -454,7 +456,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No latest version class found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_uses_version_specific_class_when_available(self, monkeypatch, factory): + def test_uses_version_specific_class_when_available(self, monkeypatch: pytest.MonkeyPatch, factory): matched_node = sentinel.matched_node latest_node_class = _node_constructor(return_value=sentinel.latest_node) matched_node_class = _node_constructor(return_value=matched_node) @@ -470,12 +472,14 @@ class TestDifyNodeFactoryCreateNode: matched_node_class.assert_called_once() kwargs = matched_node_class.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state latest_node_class.assert_not_called() - def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing(self, monkeypatch, factory): + def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing( + self, monkeypatch: pytest.MonkeyPatch, factory + ): latest_node = sentinel.latest_node latest_node_class = _node_constructor(return_value=latest_node) monkeypatch.setattr( @@ -490,7 +494,7 @@ class TestDifyNodeFactoryCreateNode: latest_node_class.assert_called_once() kwargs = latest_node_class.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state @@ -507,7 +511,7 @@ class TestDifyNodeFactoryCreateNode: (BuiltinNodeTypes.DOCUMENT_EXTRACTOR, "DocumentExtractorNode"), ], ) - def test_creates_specialized_nodes(self, monkeypatch, factory, node_type, constructor_name): + def test_creates_specialized_nodes(self, monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name): created_node = object() constructor = _node_constructor(return_value=created_node) constructor._mock_name = constructor_name @@ -528,7 +532,7 @@ class TestDifyNodeFactoryCreateNode: assert result is created_node kwargs = constructor.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=node_type) + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=node_type) assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state @@ -597,9 +601,12 @@ class TestDifyNodeFactoryCreateNode: prepared_llm.assert_called_once_with(sentinel.model_instance) assert kwargs["model_instance"] is wrapped_model_instance - def test_create_node_passes_alias_preserving_llm_config_to_constructor(self, monkeypatch, factory): + def test_create_node_passes_alias_preserving_llm_data_to_constructor(self, monkeypatch, factory): created_node = object() constructor = _node_constructor(return_value=created_node) + constructor.validate_node_data.side_effect = lambda node_data: LLMNodeData.model_validate( + node_data.model_dump(mode="python") if isinstance(node_data, BaseNodeData) else node_data + ) monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor)) monkeypatch.setattr(factory, "_build_llm_compatible_node_init_kwargs", MagicMock(return_value={})) @@ -625,10 +632,56 @@ class TestDifyNodeFactoryCreateNode: factory.create_node(node_config) - config = constructor.call_args.kwargs["config"] - assert isinstance(config, dict) - assert config["structured_output_enabled"] is True - assert "structured_output_switch_on" not in config + data = constructor.call_args.kwargs["data"] + assert isinstance(data, Mapping) + assert data["structured_output_enabled"] is True + assert "structured_output_switch_on" not in data + assert LLMNodeData.model_validate(data).structured_output_enabled is True + + def test_create_node_preserves_structured_output_switch_after_graphon_constructor(self, monkeypatch, factory): + factory.graph_init_params = SimpleNamespace( + workflow_id="workflow-id", + graph_config={}, + run_context={}, + call_depth=0, + ) + monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=LLMNode)) + monkeypatch.setattr( + factory, + "_build_llm_compatible_node_init_kwargs", + MagicMock( + return_value={ + "model_instance": sentinel.model_instance, + "llm_file_saver": sentinel.llm_file_saver, + "prompt_message_serializer": sentinel.prompt_message_serializer, + } + ), + ) + + node_config = { + "id": "llm-node-id", + "data": { + "type": BuiltinNodeTypes.LLM, + "title": "LLM", + "model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}}, + "prompt_template": [{"role": "system", "text": "x"}], + "context": {"enabled": False, "variable_selector": []}, + "vision": {"enabled": False}, + "structured_output_enabled": True, + "structured_output": { + "schema": { + "type": "object", + "properties": {"type": {"type": "string"}}, + "required": ["type"], + } + }, + }, + } + + node = factory.create_node(node_config) + + assert node.node_data.structured_output_switch_on is True + assert node.node_data.structured_output_enabled is True @pytest.mark.parametrize( ("node_type", "constructor_name", "expected_extra_kwargs"), @@ -665,7 +718,7 @@ class TestDifyNodeFactoryCreateNode: ) def test_creates_model_backed_nodes( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name, @@ -707,7 +760,7 @@ class TestDifyNodeFactoryCreateNode: constructor_kwargs = constructor.call_args.kwargs assert constructor_kwargs["node_id"] == "node-id" - _assert_typed_node_config(constructor_kwargs["config"], node_id="node-id", node_type=node_type) + _assert_constructor_node_data(constructor_kwargs["data"], node_id="node-id", node_type=node_type) assert constructor_kwargs["graph_init_params"] is sentinel.graph_init_params assert constructor_kwargs["graph_runtime_state"] is factory.graph_runtime_state assert constructor_kwargs["credentials_provider"] is sentinel.credentials_provider @@ -726,7 +779,7 @@ class TestDifyNodeFactoryModelInstance: factory._llm_model_factory = sentinel.model_factory return factory - def test_delegates_to_fetch_model_config(self, monkeypatch, factory): + def test_delegates_to_fetch_model_config(self, monkeypatch: pytest.MonkeyPatch, factory): node_data_model = SimpleNamespace( provider="provider", name="model", @@ -755,7 +808,7 @@ class TestDifyNodeFactoryModelInstance: model_factory=sentinel.model_factory, ) - def test_propagates_fetch_model_config_errors(self, monkeypatch, factory): + def test_propagates_fetch_model_config_errors(self, monkeypatch: pytest.MonkeyPatch, factory): fetch_model_config = MagicMock(side_effect=ValueError("broken model config")) monkeypatch.setattr(node_factory, "fetch_model_config", fetch_model_config) @@ -780,7 +833,7 @@ class TestDifyNodeFactoryMemory: assert result is None factory.graph_runtime_state.variable_pool.get.assert_not_called() - def test_uses_string_segment_conversation_id(self, monkeypatch, factory): + def test_uses_string_segment_conversation_id(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = StringSegment(value="conversation-id") fetch_memory = MagicMock(return_value=sentinel.memory) @@ -800,7 +853,7 @@ class TestDifyNodeFactoryMemory: model_instance=sentinel.model_instance, ) - def test_ignores_non_string_segment_conversation_ids(self, monkeypatch, factory): + def test_ignores_non_string_segment_conversation_ids(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = sentinel.segment fetch_memory = MagicMock(return_value=sentinel.memory) diff --git a/api/tests/unit_tests/core/workflow/test_variable_pool.py b/api/tests/unit_tests/core/workflow/test_variable_pool.py index 9dab38ed8e..0017cd8d3f 100644 --- a/api/tests/unit_tests/core/workflow/test_variable_pool.py +++ b/api/tests/unit_tests/core/workflow/test_variable_pool.py @@ -109,8 +109,8 @@ class TestVariablePool: assert pool.get([ENVIRONMENT_VARIABLE_NODE_ID, "env_var_1"]) is not None assert pool.get([CONVERSATION_VARIABLE_NODE_ID, "conv_var_1"]) is not None - def test_constructor_loads_legacy_bootstrap_kwargs(self): - pool = VariablePool( + def test_from_bootstrap_loads_legacy_bootstrap_kwargs(self): + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="test_user_id"), environment_variables=[StringVariable(name="env_var", value="env-value")], conversation_variables=[StringVariable(name="conv_var", value="conv-value")], diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry.py b/api/tests/unit_tests/core/workflow/test_workflow_entry.py index 041c5cc612..661882f013 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry.py @@ -19,7 +19,7 @@ from graphon.variables.variables import StringVariable @pytest.fixture(autouse=True) -def _mock_ssrf_head(monkeypatch): +def _mock_ssrf_head(monkeypatch: pytest.MonkeyPatch): """Avoid any real network requests during tests. factories.file_factory.remote.get_remote_file_info() uses ssrf_proxy.head @@ -55,7 +55,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_to_variable_pool_with_system_variables(self): """Test mapping system variables from user inputs to variable pool.""" # Initialize variable pool with system variables - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="test_user_id", app_id="test_app_id", @@ -128,7 +128,7 @@ class TestWorkflowEntry: return NodeConfigDictAdapter.validate_python(node_config) workflow = StubWorkflow() - variable_pool = VariablePool(system_variables=default_system_variables(), user_inputs={}) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}) expected_limits = CodeNodeLimits( max_string_length=dify_config.CODE_MAX_STRING_LENGTH, max_number=dify_config.CODE_MAX_NUMBER, @@ -157,7 +157,7 @@ class TestWorkflowEntry: """Test mapping environment variables from user inputs to variable pool.""" # Initialize variable pool with environment variables env_var = StringVariable(name="API_KEY", value="existing_key") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), environment_variables=[env_var], user_inputs={}, @@ -198,7 +198,7 @@ class TestWorkflowEntry: """Test mapping conversation variables from user inputs to variable pool.""" # Initialize variable pool with conversation variables conv_var = StringVariable(name="last_message", value="Hello") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), conversation_variables=[conv_var], user_inputs={}, @@ -239,7 +239,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_to_variable_pool_with_regular_variables(self): """Test mapping regular node variables from user inputs to variable pool.""" # Initialize empty variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -281,7 +281,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_file_handling(self): """Test mapping file inputs from user inputs to variable pool.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -340,7 +340,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_missing_variable_error(self): """Test that mapping raises error when required variable is missing.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -366,7 +366,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_alternative_key_format(self): """Test mapping with alternative key format (without node prefix).""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -396,7 +396,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_complex_selectors(self): """Test mapping with complex node variable keys.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -432,7 +432,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_invalid_node_variable(self): """Test that mapping handles invalid node variable format.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -463,7 +463,7 @@ class TestWorkflowEntry: env_var = StringVariable(name="API_KEY", value="existing_key") conv_var = StringVariable(name="session_id", value="session123") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="test_user", app_id="test_app", diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py index 270d0bf90d..a57cdd1337 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py @@ -7,7 +7,6 @@ import pytest from core.app.apps.exc import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom -from core.model_manager import ModelInstance from core.workflow import workflow_entry from core.workflow.system_variables import default_system_variables from graphon.entities.base_node_data import BaseNodeData @@ -16,10 +15,12 @@ from graphon.errors import WorkflowNodeRunFailedError from graphon.file import File, FileTransferMethod, FileType from graphon.graph import Graph from graphon.graph_events import GraphRunFailedEvent -from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.llm_entities import LLMMode, LLMUsage from graphon.node_events import NodeRunResult from graphon.nodes import BuiltinNodeTypes from graphon.nodes.base.node import Node +from graphon.nodes.llm.entities import ContextConfig, LLMNodeData, ModelConfig +from graphon.nodes.question_classifier.entities import QuestionClassifierNodeData from graphon.runtime import ChildGraphNotFoundError, VariablePool from graphon.variables.variables import StringVariable from tests.workflow_test_utils import build_test_graph_init_params, build_test_variable_pool @@ -29,9 +30,30 @@ def _build_typed_node_config(node_type: NodeType): return {"id": "node-id", "data": BaseNodeData(type=node_type)} -def _build_wrapped_model_instance() -> tuple[SimpleNamespace, ModelInstance]: - raw_model_instance = ModelInstance.__new__(ModelInstance) - return SimpleNamespace(_model_instance=raw_model_instance), raw_model_instance +def _build_model_config(*, provider: str = "openai", model_name: str = "gpt-4o") -> ModelConfig: + return ModelConfig(provider=provider, name=model_name, mode=LLMMode.CHAT) + + +def _build_llm_node_data(*, provider: str = "openai", model_name: str = "gpt-4o") -> LLMNodeData: + return LLMNodeData( + type=BuiltinNodeTypes.LLM, + title="Child Model", + model=_build_model_config(provider=provider, model_name=model_name), + prompt_template=[], + context=ContextConfig(enabled=False), + ) + + +def _build_question_classifier_node_data( + *, provider: str = "openai", model_name: str = "gpt-4o" +) -> QuestionClassifierNodeData: + return QuestionClassifierNodeData( + type=BuiltinNodeTypes.QUESTION_CLASSIFIER, + title="Child Model", + query_variable_selector=["sys", "query"], + model=_build_model_config(provider=provider, model_name=model_name), + classes=[], + ) class _FakeModelNodeMixin: @@ -40,22 +62,26 @@ class _FakeModelNodeMixin: return "1" def post_init(self) -> None: - self.model_instance, self.raw_model_instance = _build_wrapped_model_instance() + self.model_instance = SimpleNamespace(provider="stale-provider", model_name="stale-model") self.usage_snapshot = LLMUsage.empty_usage() self.usage_snapshot.total_tokens = 1 def _run(self) -> NodeRunResult: return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, + inputs={ + "model_provider": self.node_data.model.provider, + "model_name": self.node_data.model.name, + }, llm_usage=self.usage_snapshot, ) -class _FakeLLMNode(_FakeModelNodeMixin, Node[BaseNodeData]): +class _FakeLLMNode(_FakeModelNodeMixin, Node[LLMNodeData]): node_type = BuiltinNodeTypes.LLM -class _FakeQuestionClassifierNode(_FakeModelNodeMixin, Node[BaseNodeData]): +class _FakeQuestionClassifierNode(_FakeModelNodeMixin, Node[QuestionClassifierNodeData]): node_type = BuiltinNodeTypes.QUESTION_CLASSIFIER @@ -75,7 +101,7 @@ class TestWorkflowChildEngineBuilder: assert result is expected def test_build_child_engine_raises_when_root_node_is_missing(self): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = SimpleNamespace(graph_config={"nodes": []}) parent_graph_runtime_state = SimpleNamespace( execution_context=sentinel.execution_context, @@ -92,7 +118,7 @@ class TestWorkflowChildEngineBuilder: ) def test_build_child_engine_constructs_graph_engine_with_quota_layer_only(self): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = SimpleNamespace(graph_config={"nodes": [{"id": "root"}]}) parent_graph_runtime_state = SimpleNamespace( execution_context=sentinel.execution_context, @@ -114,7 +140,7 @@ class TestWorkflowChildEngineBuilder: patch.object(workflow_entry, "GraphEngine", return_value=child_engine) as graph_engine_cls, patch.object(workflow_entry, "GraphEngineConfig", return_value=sentinel.graph_engine_config), patch.object(workflow_entry, "InMemoryChannel", return_value=sentinel.command_channel), - patch.object(workflow_entry, "LLMQuotaLayer", return_value=sentinel.llm_quota_layer), + patch.object(workflow_entry, "LLMQuotaLayer", return_value=sentinel.llm_quota_layer) as llm_quota_layer_cls, ): result = builder.build_child_engine( workflow_id="workflow-id", @@ -147,11 +173,12 @@ class TestWorkflowChildEngineBuilder: config=sentinel.graph_engine_config, child_engine_builder=builder, ) + llm_quota_layer_cls.assert_called_once_with(tenant_id="tenant-id") assert child_engine.layer.call_args_list == [((sentinel.llm_quota_layer,), {})] @pytest.mark.parametrize("node_cls", [_FakeLLMNode, _FakeQuestionClassifierNode]) def test_build_child_engine_runs_llm_quota_layer_for_child_model_nodes(self, node_cls): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = build_test_graph_init_params( graph_config={"nodes": [{"id": "root"}], "edges": []}, ) @@ -163,12 +190,10 @@ class TestWorkflowChildEngineBuilder: def build_graph(*, graph_config, node_factory, root_node_id): _ = graph_config + node_data = _build_llm_node_data() if node_cls is _FakeLLMNode else _build_question_classifier_node_data() node = node_cls( node_id=root_node_id, - config=BaseNodeData( - type=node_cls.node_type, - title="Child Model", - ), + data=node_data, graph_init_params=node_factory.graph_init_params, graph_runtime_state=node_factory.graph_runtime_state, ) @@ -191,8 +216,8 @@ class TestWorkflowChildEngineBuilder: ), ), patch.object(workflow_entry.Graph, "init", side_effect=build_graph), - patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available") as ensure_quota, - patch("core.app.workflow.layers.llm_quota.deduct_llm_quota") as deduct_quota, + patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model") as ensure_quota, + patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model") as deduct_quota, ): child_engine = builder.build_child_engine( workflow_id="workflow-id", @@ -203,10 +228,15 @@ class TestWorkflowChildEngineBuilder: list(child_engine.run()) node = created_node["node"] - ensure_quota.assert_called_once_with(model_instance=node.raw_model_instance) + ensure_quota.assert_called_once_with( + tenant_id="tenant-id", + provider=node.node_data.model.provider, + model=node.node_data.model.name, + ) deduct_quota.assert_called_once_with( - tenant_id="tenant", - model_instance=node.raw_model_instance, + tenant_id="tenant-id", + provider=node.node_data.model.provider, + model=node.node_data.model.name, usage=node.usage_snapshot, ) @@ -252,7 +282,7 @@ class TestWorkflowEntryInit: "ExecutionLimitsLayer", return_value=execution_limits_layer, ) as execution_limits_layer_cls, - patch.object(workflow_entry, "LLMQuotaLayer", return_value=llm_quota_layer), + patch.object(workflow_entry, "LLMQuotaLayer", return_value=llm_quota_layer) as llm_quota_layer_cls, patch.object(workflow_entry, "ObservabilityLayer", return_value=observability_layer), ): entry = workflow_entry.WorkflowEntry( @@ -291,6 +321,7 @@ class TestWorkflowEntryInit: max_steps=workflow_entry.dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_time=workflow_entry.dify_config.WORKFLOW_MAX_EXECUTION_TIME, ) + llm_quota_layer_cls.assert_called_once_with(tenant_id="tenant-id") assert graph_engine.layer.call_args_list == [ ((debug_layer,), {}), ((execution_limits_layer,), {}), @@ -334,7 +365,7 @@ class TestWorkflowEntrySingleStepRun: def extract_variable_selector_to_variable_mapping(**_kwargs): return {} - variable_pool = VariablePool(system_variables=default_system_variables(), user_inputs={}) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}) variable_loader = MagicMock() variable_loader.load_variables.return_value = [ StringVariable( @@ -603,7 +634,7 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_rejects_missing_node_class(self, monkeypatch): + def test_run_free_node_rejects_missing_node_class(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( workflow_entry, "resolve_workflow_node_class", @@ -619,7 +650,9 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented(self, monkeypatch): + def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented( + self, monkeypatch: pytest.MonkeyPatch + ): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): @@ -707,7 +740,7 @@ class TestWorkflowEntryHelpers: tenant_id="tenant-id", ) - def test_run_free_node_wraps_execution_failures(self, monkeypatch): + def test_run_free_node_wraps_execution_failures(self, monkeypatch: pytest.MonkeyPatch): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): diff --git a/api/tests/unit_tests/events/test_update_provider_when_message_created.py b/api/tests/unit_tests/events/test_update_provider_when_message_created.py new file mode 100644 index 0000000000..9cb8ca7854 --- /dev/null +++ b/api/tests/unit_tests/events/test_update_provider_when_message_created.py @@ -0,0 +1,130 @@ +from types import SimpleNamespace +from unittest.mock import patch +from uuid import uuid4 + +from sqlalchemy import create_engine, select + +from core.app.entities.app_invoke_entities import ChatAppGenerateEntity +from core.entities.provider_entities import ProviderQuotaType, QuotaUnit +from events.event_handlers import update_provider_when_message_created +from models import TenantCreditPool +from models.provider import ProviderType + + +def test_message_created_trial_credit_accounting_does_not_raise_when_balance_is_insufficient() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + tenant_id = str(uuid4()) + pool_id = str(uuid4()) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": pool_id, + "tenant_id": tenant_id, + "pool_type": ProviderQuotaType.TRIAL, + "quota_limit": 10, + "quota_used": 9, + }, + ) + + system_configuration = SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=10, + ) + ], + ) + application_generate_entity = ChatAppGenerateEntity.model_construct( + app_config=SimpleNamespace(tenant_id=tenant_id), + model_conf=SimpleNamespace( + provider="openai", + model="gpt-4o", + provider_model_bundle=SimpleNamespace( + configuration=SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=system_configuration, + ) + ), + ), + ) + message = SimpleNamespace(message_tokens=2, answer_tokens=1) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(update_provider_when_message_created, "_execute_provider_updates"), + ): + update_provider_when_message_created.handle( + sender=message, + application_generate_entity=application_generate_entity, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == pool_id)) + + assert quota_used == 10 + + +def test_message_created_paid_credit_accounting_uses_paid_pool() -> None: + tenant_id = str(uuid4()) + system_configuration = SimpleNamespace( + current_quota_type=ProviderQuotaType.PAID, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.PAID, + quota_unit=QuotaUnit.TOKENS, + quota_limit=10, + ) + ], + ) + application_generate_entity = ChatAppGenerateEntity.model_construct( + app_config=SimpleNamespace(tenant_id=tenant_id), + model_conf=SimpleNamespace( + provider="openai", + model="gpt-4o", + provider_model_bundle=SimpleNamespace( + configuration=SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=system_configuration, + ) + ), + ), + ) + message = SimpleNamespace(message_tokens=2, answer_tokens=1) + + with ( + patch.object(update_provider_when_message_created, "_deduct_credit_pool_quota_capped") as mock_deduct, + patch.object(update_provider_when_message_created, "_execute_provider_updates"), + ): + update_provider_when_message_created.handle( + sender=message, + application_generate_entity=application_generate_entity, + ) + + mock_deduct.assert_called_once_with( + tenant_id=tenant_id, + credits_required=3, + pool_type="paid", + ) + + +def test_capped_credit_pool_accounting_skips_exhaustion_warning_when_full_amount_is_deducted(caplog) -> None: + with patch( + "services.credit_pool_service.CreditPoolService.deduct_credits_capped", + return_value=3, + ) as mock_deduct: + update_provider_when_message_created._deduct_credit_pool_quota_capped( + tenant_id="tenant-id", + credits_required=3, + pool_type="trial", + ) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + credits_required=3, + pool_type="trial", + ) + assert "Credit pool exhausted during message-created accounting" not in caplog.text diff --git a/api/tests/unit_tests/extensions/test_ext_request_logging.py b/api/tests/unit_tests/extensions/test_ext_request_logging.py index dcb457c806..03479686bb 100644 --- a/api/tests/unit_tests/extensions/test_ext_request_logging.py +++ b/api/tests/unit_tests/extensions/test_ext_request_logging.py @@ -71,7 +71,7 @@ def enable_request_logging(monkeypatch: pytest.MonkeyPatch): class TestRequestLoggingExtension: def test_receiver_should_not_be_invoked_if_configuration_is_disabled( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_request_receiver, mock_response_receiver, ): @@ -266,7 +266,9 @@ class TestResponseUnmodified: class TestRequestFinishedInfoAccessLine: - def test_info_access_log_includes_method_path_status_duration_trace_id(self, monkeypatch, caplog): + def test_info_access_log_includes_method_path_status_duration_trace_id( + self, monkeypatch: pytest.MonkeyPatch, caplog + ): """Ensure INFO access line contains expected fields with computed duration and trace id.""" app = _get_test_app() # Push a real request context so flask.request and g are available @@ -299,7 +301,7 @@ class TestRequestFinishedInfoAccessLine: assert "123.456" in msg # rounded to 3 decimals assert "trace-xyz" in msg - def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch, caplog): + def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch: pytest.MonkeyPatch, caplog): app = _get_test_app() with app.test_request_context("/bar", method="POST"): # No g.__request_started_ts set -> duration should be '-' diff --git a/api/tests/unit_tests/extensions/test_pubsub_channel.py b/api/tests/unit_tests/extensions/test_pubsub_channel.py index 926c406ad4..24bbf55cb3 100644 --- a/api/tests/unit_tests/extensions/test_pubsub_channel.py +++ b/api/tests/unit_tests/extensions/test_pubsub_channel.py @@ -1,10 +1,12 @@ +import pytest + from configs import dify_config from extensions import ext_redis from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel from libs.broadcast_channel.redis.sharded_channel import ShardedRedisBroadcastChannel -def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): +def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) @@ -13,7 +15,7 @@ def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): assert isinstance(channel, RedisBroadcastChannel) -def test_get_pubsub_broadcast_channel_sharded(monkeypatch): +def test_get_pubsub_broadcast_channel_sharded(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) diff --git a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py index 8bef01c1ed..7c7f20374e 100644 --- a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py +++ b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py @@ -673,7 +673,7 @@ class TestRedisShardedSubscription: """Test cases for the _RedisShardedSubscription class.""" @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture @@ -889,7 +889,9 @@ class TestRedisShardedSubscription: assert not sharded_subscription._queue.empty() assert sharded_subscription._queue.get_nowait() == b"test sharded payload" - def test_get_message_uses_target_node_for_cluster_client(self, mock_pubsub: MagicMock, monkeypatch): + def test_get_message_uses_target_node_for_cluster_client( + self, mock_pubsub: MagicMock, monkeypatch: pytest.MonkeyPatch + ): """Test that cluster clients use target_node for sharded messages.""" class DummyRedisCluster: @@ -1177,7 +1179,7 @@ class TestRedisSubscriptionCommon: return request.param @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture diff --git a/api/tests/unit_tests/libs/test_archive_storage.py b/api/tests/unit_tests/libs/test_archive_storage.py index de3c9c4737..4363c23571 100644 --- a/api/tests/unit_tests/libs/test_archive_storage.py +++ b/api/tests/unit_tests/libs/test_archive_storage.py @@ -34,7 +34,7 @@ def _client_error(code: str) -> ClientError: return ClientError({"Error": {"Code": code}}, "Operation") -def _mock_client(monkeypatch): +def _mock_client(monkeypatch: pytest.MonkeyPatch): client = MagicMock() client.head_bucket.return_value = None # Configure put_object to return a proper ETag that matches the MD5 hash @@ -56,19 +56,19 @@ def _mock_client(monkeypatch): return client, boto_client -def test_init_disabled(monkeypatch): +def test_init_disabled(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENABLED=False) with pytest.raises(ArchiveStorageNotConfiguredError, match="not enabled"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_missing_config(monkeypatch): +def test_init_missing_config(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENDPOINT=None) with pytest.raises(ArchiveStorageNotConfiguredError, match="incomplete"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_not_found(monkeypatch): +def test_init_bucket_not_found(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("404") @@ -77,7 +77,7 @@ def test_init_bucket_not_found(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_access_denied(monkeypatch): +def test_init_bucket_access_denied(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("403") @@ -86,7 +86,7 @@ def test_init_bucket_access_denied(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_other_error(monkeypatch): +def test_init_bucket_other_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("500") @@ -95,7 +95,7 @@ def test_init_bucket_other_error(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_sets_client(monkeypatch): +def test_init_sets_client(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, boto_client = _mock_client(monkeypatch) @@ -113,7 +113,7 @@ def test_init_sets_client(monkeypatch): assert storage.bucket == BUCKET_NAME -def test_put_object_returns_checksum(monkeypatch): +def test_put_object_returns_checksum(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -132,7 +132,7 @@ def test_put_object_returns_checksum(monkeypatch): assert checksum == expected_md5 -def test_put_object_raises_on_error(monkeypatch): +def test_put_object_raises_on_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -142,7 +142,7 @@ def test_put_object_raises_on_error(monkeypatch): storage.put_object("key", b"data") -def test_get_object_returns_bytes(monkeypatch): +def test_get_object_returns_bytes(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -153,7 +153,7 @@ def test_get_object_returns_bytes(monkeypatch): assert storage.get_object("key") == b"payload" -def test_get_object_missing(monkeypatch): +def test_get_object_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -163,7 +163,7 @@ def test_get_object_missing(monkeypatch): storage.get_object("missing") -def test_get_object_stream(monkeypatch): +def test_get_object_stream(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -174,7 +174,7 @@ def test_get_object_stream(monkeypatch): assert list(storage.get_object_stream("key")) == [b"a", b"b"] -def test_get_object_stream_missing(monkeypatch): +def test_get_object_stream_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -184,7 +184,7 @@ def test_get_object_stream_missing(monkeypatch): list(storage.get_object_stream("missing")) -def test_object_exists(monkeypatch): +def test_object_exists(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -194,7 +194,7 @@ def test_object_exists(monkeypatch): assert storage.object_exists("missing") is False -def test_delete_object_error(monkeypatch): +def test_delete_object_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.delete_object.side_effect = _client_error("500") @@ -204,7 +204,7 @@ def test_delete_object_error(monkeypatch): storage.delete_object("key") -def test_list_objects(monkeypatch): +def test_list_objects(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -219,7 +219,7 @@ def test_list_objects(monkeypatch): paginator.paginate.assert_called_once_with(Bucket="archive-bucket", Prefix="prefix") -def test_list_objects_error(monkeypatch): +def test_list_objects_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -231,7 +231,7 @@ def test_list_objects_error(monkeypatch): storage.list_objects("prefix") -def test_generate_presigned_url(monkeypatch): +def test_generate_presigned_url(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.return_value = "http://signed-url" @@ -247,7 +247,7 @@ def test_generate_presigned_url(monkeypatch): assert url == "http://signed-url" -def test_generate_presigned_url_error(monkeypatch): +def test_generate_presigned_url_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.side_effect = _client_error("500") diff --git a/api/tests/unit_tests/libs/test_pandas.py b/api/tests/unit_tests/libs/test_pandas.py index 21c2f0781d..a4739dbbc2 100644 --- a/api/tests/unit_tests/libs/test_pandas.py +++ b/api/tests/unit_tests/libs/test_pandas.py @@ -1,7 +1,8 @@ import pandas as pd +import pytest -def test_pandas_csv(tmp_path, monkeypatch): +def test_pandas_csv(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -16,7 +17,7 @@ def test_pandas_csv(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx(tmp_path, monkeypatch): +def test_pandas_xlsx(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -31,7 +32,7 @@ def test_pandas_xlsx(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch): +def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data1 = {"col1": [1, 2, 3, 4, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data1) diff --git a/api/tests/unit_tests/libs/test_rate_limiter.py b/api/tests/unit_tests/libs/test_rate_limiter.py index 9d44b07b5e..5052033db8 100644 --- a/api/tests/unit_tests/libs/test_rate_limiter.py +++ b/api/tests/unit_tests/libs/test_rate_limiter.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock +import pytest + from libs import helper as helper_module @@ -31,7 +33,7 @@ class _FakeRedis: return True -def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): +def test_rate_limiter_counts_attempts_within_same_second(monkeypatch: pytest.MonkeyPatch): fake_redis = _FakeRedis() monkeypatch.setattr(helper_module.time, "time", lambda: 1000) @@ -48,7 +50,7 @@ def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): assert limiter.is_rate_limited("203.0.113.10") is True -def test_rate_limiter_uses_injected_redis(monkeypatch): +def test_rate_limiter_uses_injected_redis(monkeypatch: pytest.MonkeyPatch): redis_client = MagicMock() redis_client.zcard.return_value = 1 monkeypatch.setattr(helper_module.time, "time", lambda: 1000) diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index 6a65b5faa0..734568d37b 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,6 @@ from unittest.mock import MagicMock +import pytest from werkzeug.wrappers import Response from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN @@ -30,7 +31,7 @@ def test_extract_access_token(): assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] -def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): +def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", "", raising=False) @@ -38,7 +39,7 @@ def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): assert token._real_cookie_name("csrf_token") == "__Host-csrf_token" -def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): +def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) @@ -46,7 +47,7 @@ def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): assert token._real_cookie_name("csrf_token") == "csrf_token" -def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch): +def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) diff --git a/api/tests/unit_tests/services/plugin/conftest.py b/api/tests/unit_tests/services/plugin/conftest.py index 80c6077b0c..9dc4fa0390 100644 --- a/api/tests/unit_tests/services/plugin/conftest.py +++ b/api/tests/unit_tests/services/plugin/conftest.py @@ -21,7 +21,7 @@ def make_features( @pytest.fixture -def mock_installer(monkeypatch): +def mock_installer(monkeypatch: pytest.MonkeyPatch): """Patch PluginInstaller at the service import site.""" mock = MagicMock() monkeypatch.setattr("services.plugin.plugin_service.PluginInstaller", lambda: mock) diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py index 1a2d062208..287391c24c 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py @@ -2,12 +2,13 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from services.rag_pipeline.rag_pipeline_task_proxy import RagPipelineTaskProxy @pytest.fixture -def proxy(mocker): +def proxy(mocker: MockerFixture): """Create a RagPipelineTaskProxy with mocked dependencies.""" mocker.patch("services.rag_pipeline.rag_pipeline_task_proxy.TenantIsolatedTaskQueue") entity = Mock() diff --git a/api/tests/unit_tests/services/recommend_app/test_category_order.py b/api/tests/unit_tests/services/recommend_app/test_category_order.py new file mode 100644 index 0000000000..3b94021f26 --- /dev/null +++ b/api/tests/unit_tests/services/recommend_app/test_category_order.py @@ -0,0 +1,26 @@ +import json +from unittest.mock import patch + +from services.recommend_app.category_order import get_explore_app_category_order, order_categories + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_get_explore_app_category_order_returns_redis_list(mock_get): + mock_get.return_value = json.dumps(["C", "A", "B"]).encode() + + assert get_explore_app_category_order("en-US") == ["C", "A", "B"] + mock_get.assert_called_once_with("explore:apps:category_order:en-US") + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_order_categories_uses_redis_order_as_source_of_truth(mock_get): + mock_get.return_value = json.dumps(["C", "A", "B"]).encode() + + assert order_categories({"A", "B", "C", "D"}, "en-US") == ["C", "A", "B"] + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_order_categories_falls_back_to_sorted_categories_without_redis_order(mock_get): + mock_get.return_value = None + + assert order_categories({"B", "A", "C"}, "en-US") == ["A", "B", "C"] diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index d3f9c5dd9f..216c5d9db6 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -20,6 +20,7 @@ from contextlib import contextmanager from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.app_generate_service as ags_module from core.app.entities.app_invoke_entities import InvokeFrom @@ -96,7 +97,7 @@ def _noop_rate_limit_context(rate_limit, request_id): class TestBuildStreamingTaskOnSubscribe: """Tests for AppGenerateService._build_streaming_task_on_subscribe.""" - def test_streams_mode_starts_immediately(self, monkeypatch): + def test_streams_mode_starts_immediately(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") called = [] cb = AppGenerateService._build_streaming_task_on_subscribe(lambda: called.append(1)) @@ -106,7 +107,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] # not called again - def test_pubsub_mode_starts_on_subscribe(self, monkeypatch): + def test_pubsub_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) # large to prevent timer called = [] @@ -118,7 +119,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_sharded_mode_starts_on_subscribe(self, monkeypatch): + def test_sharded_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): """sharded is treated like pubsub (i.e. not 'streams').""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) @@ -128,7 +129,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_pubsub_fallback_timer_fires(self, monkeypatch): + def test_pubsub_fallback_timer_fires(self, monkeypatch: pytest.MonkeyPatch): """When nobody subscribes fast enough the fallback timer fires.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 50) # 50 ms @@ -137,7 +138,7 @@ class TestBuildStreamingTaskOnSubscribe: time.sleep(0.2) # give the timer time to fire assert called == [1] - def test_exception_in_start_task_returns_false(self, monkeypatch): + def test_exception_in_start_task_returns_false(self, monkeypatch: pytest.MonkeyPatch): """When start_task raises, _try_start returns False and next call retries.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") call_count = 0 @@ -154,7 +155,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert call_count == 2 - def test_concurrent_subscribe_only_starts_once(self, monkeypatch): + def test_concurrent_subscribe_only_starts_once(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) call_count = 0 @@ -176,31 +177,31 @@ class TestBuildStreamingTaskOnSubscribe: # _get_max_active_requests # --------------------------------------------------------------------------- class TestGetMaxActiveRequests: - def test_both_zero_returns_zero(self, monkeypatch): + def test_both_zero_returns_zero(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 0 - def test_app_limit_only(self, monkeypatch): + def test_app_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_config_limit_only(self, monkeypatch): + def test_config_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 10) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 10 - def test_both_non_zero_returns_min(self, monkeypatch): + def test_both_non_zero_returns_min(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 20) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_default_active_requests_used_when_app_has_none(self, monkeypatch): + def test_default_active_requests_used_when_app_has_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 15) app = _make_app(AppMode.CHAT, max_active_requests=0) @@ -214,7 +215,7 @@ class TestGenerate: """Tests for AppGenerateService.generate covering each mode.""" @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) # Prevent AppExecutionParams.new from touching real models via isinstance @@ -224,7 +225,7 @@ class TestGenerate: ) # -- COMPLETION --------------------------------------------------------- - def test_completion_mode(self, mocker): + def test_completion_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate", return_value={"result": "ok"}, @@ -244,7 +245,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via mode ------------------------------------------------ - def test_agent_chat_mode(self, mocker): + def test_agent_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent"}, @@ -264,7 +265,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via is_agent flag (non-AGENT_CHAT mode) ----------------- - def test_agent_via_is_agent_flag(self, mocker): + def test_agent_via_is_agent_flag(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent-via-flag"}, @@ -285,7 +286,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- CHAT --------------------------------------------------------------- - def test_chat_mode(self, mocker): + def test_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.ChatAppGenerator.generate", return_value={"result": "chat"}, @@ -306,7 +307,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- ADVANCED_CHAT blocking --------------------------------------------- - def test_advanced_chat_blocking(self, mocker): + def test_advanced_chat_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) @@ -333,7 +334,7 @@ class TestGenerate: retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- - def test_advanced_chat_streaming(self, mocker, monkeypatch): + def test_advanced_chat_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -365,7 +366,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- WORKFLOW blocking -------------------------------------------------- - def test_workflow_blocking(self, mocker): + def test_workflow_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -390,7 +391,7 @@ class TestGenerate: assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" # -- WORKFLOW streaming ------------------------------------------------- - def test_workflow_streaming(self, mocker, monkeypatch): + def test_workflow_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -422,7 +423,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- Invalid mode ------------------------------------------------------- - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app("invalid-mode", is_agent=False) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate( @@ -439,14 +440,14 @@ class TestGenerate: # --------------------------------------------------------------------------- class TestGenerateBilling: @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) mocker.patch( "services.app_generate_service.rate_limit_context", _noop_rate_limit_context, ) - def test_billing_enabled_consumes_quota(self, mocker, monkeypatch): + def test_billing_enabled_consumes_quota(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() reserve_mock = mocker.patch( @@ -472,7 +473,9 @@ class TestGenerateBilling: reserve_mock.assert_called_once_with(QuotaType.WORKFLOW, "tenant-id") quota_charge.commit.assert_called_once() - def test_billing_quota_exceeded_raises_rate_limit_error(self, mocker, monkeypatch): + def test_billing_quota_exceeded_raises_rate_limit_error( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): from services.errors.app import QuotaExceededError from services.errors.llm import InvokeRateLimitError @@ -491,7 +494,7 @@ class TestGenerateBilling: streaming=False, ) - def test_exception_refunds_quota_and_exits_rate_limit(self, mocker, monkeypatch): + def test_exception_refunds_quota_and_exits_rate_limit(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() mocker.patch( @@ -517,7 +520,9 @@ class TestGenerateBilling: ) quota_charge.refund.assert_called_once() - def test_rate_limit_exit_called_in_finally_for_blocking(self, mocker, monkeypatch): + def test_rate_limit_exit_called_in_finally_for_blocking( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): """For non-streaming (blocking) calls, rate_limit.exit should be called in finally.""" monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) @@ -552,7 +557,7 @@ class TestGenerateBilling: # _get_workflow # --------------------------------------------------------------------------- class TestGetWorkflow: - def test_debugger_fetches_draft(self, mocker): + def test_debugger_fetches_draft(self, mocker: MockerFixture): draft_wf = _make_workflow() ws = MagicMock() ws.get_draft_workflow.return_value = draft_wf @@ -562,7 +567,7 @@ class TestGetWorkflow: assert result is draft_wf ws.get_draft_workflow.assert_called_once() - def test_debugger_raises_when_no_draft(self, mocker): + def test_debugger_raises_when_no_draft(self, mocker: MockerFixture): ws = MagicMock() ws.get_draft_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -570,7 +575,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not initialized"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.DEBUGGER) - def test_non_debugger_fetches_published(self, mocker): + def test_non_debugger_fetches_published(self, mocker: MockerFixture): pub_wf = _make_workflow() ws = MagicMock() ws.get_published_workflow.return_value = pub_wf @@ -580,7 +585,7 @@ class TestGetWorkflow: assert result is pub_wf ws.get_published_workflow.assert_called_once() - def test_non_debugger_raises_when_no_published(self, mocker): + def test_non_debugger_raises_when_no_published(self, mocker: MockerFixture): ws = MagicMock() ws.get_published_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -588,7 +593,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not published"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API) - def test_specific_workflow_id_valid_uuid(self, mocker): + def test_specific_workflow_id_valid_uuid(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) specific_wf = _make_workflow(workflow_id=valid_uuid) ws = MagicMock() @@ -601,7 +606,7 @@ class TestGetWorkflow: assert result is specific_wf ws.get_published_workflow_by_id.assert_called_once() - def test_specific_workflow_id_invalid_uuid(self, mocker): + def test_specific_workflow_id_invalid_uuid(self, mocker: MockerFixture): ws = MagicMock() mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -610,7 +615,7 @@ class TestGetWorkflow: _make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API, workflow_id="not-a-uuid" ) - def test_specific_workflow_id_not_found(self, mocker): + def test_specific_workflow_id_not_found(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) ws = MagicMock() ws.get_published_workflow_by_id.return_value = None @@ -626,7 +631,7 @@ class TestGetWorkflow: # generate_single_iteration # --------------------------------------------------------------------------- class TestGenerateSingleIteration: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -644,7 +649,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "iteration"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -662,7 +667,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "wf-iteration"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.CHAT) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_iteration(app_model=app, user=_make_user(), node_id="n1", args={}) @@ -672,7 +677,7 @@ class TestGenerateSingleIteration: # generate_single_loop # --------------------------------------------------------------------------- class TestGenerateSingleLoop: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -690,7 +695,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "loop"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -708,7 +713,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "wf-loop"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.COMPLETION) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_loop(app_model=app, user=_make_user(), node_id="n1", args=MagicMock()) @@ -718,7 +723,7 @@ class TestGenerateSingleLoop: # generate_more_like_this # --------------------------------------------------------------------------- class TestGenerateMoreLikeThis: - def test_delegates_to_completion_generator(self, mocker): + def test_delegates_to_completion_generator(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate_more_like_this", return_value={"result": "similar"}, @@ -739,7 +744,7 @@ class TestGenerateMoreLikeThis: # get_response_generator # --------------------------------------------------------------------------- class TestGetResponseGenerator: - def test_non_ended_workflow_run(self, mocker): + def test_non_ended_workflow_run(self, mocker: MockerFixture): app = _make_app(AppMode.ADVANCED_CHAT) workflow_run = MagicMock() workflow_run.id = "run-1" @@ -756,7 +761,7 @@ class TestGetResponseGenerator: result = AppGenerateService.get_response_generator(app_model=app, workflow_run=workflow_run) gen_instance.retrieve_events.assert_called_once() - def test_ended_workflow_run_still_returns_generator(self, mocker): + def test_ended_workflow_run_still_returns_generator(self, mocker: MockerFixture): """Even when the run is ended, the current code still returns a generator (TODO branch).""" app = _make_app(AppMode.WORKFLOW) workflow_run = MagicMock() diff --git a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py index 30aa359b45..4293be8f72 100644 --- a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py +++ b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py @@ -89,7 +89,7 @@ class _FakeStreams: @pytest.fixture -def _patch_get_channel_streams(monkeypatch): +def _patch_get_channel_streams(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.streams_channel import StreamsBroadcastChannel fake = _FakeStreams() @@ -108,7 +108,7 @@ def _patch_get_channel_streams(monkeypatch): @pytest.fixture -def _patch_get_channel_pubsub(monkeypatch): +def _patch_get_channel_pubsub(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel store: dict[str, deque[bytes]] = defaultdict(deque) @@ -163,7 +163,7 @@ def test_streams_full_flow_prepublish_and_replay(): @pytest.mark.usefixtures("_patch_get_channel_pubsub") -def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch): +def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch: pytest.MonkeyPatch): # Speed up any potential timer if it accidentally triggers monkeypatch.setattr("services.app_generate_service.SSE_TASK_START_FALLBACK_MS", 50) diff --git a/api/tests/unit_tests/services/test_credit_pool_service.py b/api/tests/unit_tests/services/test_credit_pool_service.py new file mode 100644 index 0000000000..e77ef894e7 --- /dev/null +++ b/api/tests/unit_tests/services/test_credit_pool_service.py @@ -0,0 +1,158 @@ +from types import SimpleNamespace +from unittest.mock import patch +from uuid import uuid4 + +import pytest +from sqlalchemy import create_engine, select +from sqlalchemy.engine import Engine + +from core.errors.error import QuotaExceededError +from models import TenantCreditPool +from models.enums import ProviderQuotaType +from services.credit_pool_service import CreditPoolService + + +def _create_engine_with_pool(*, quota_limit: int, quota_used: int) -> tuple[Engine, str, str]: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + tenant_id = str(uuid4()) + pool_id = str(uuid4()) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": pool_id, + "tenant_id": tenant_id, + "pool_type": ProviderQuotaType.TRIAL, + "quota_limit": quota_limit, + "quota_used": quota_used, + }, + ) + return engine, tenant_id, pool_id + + +def _get_quota_used(*, engine: Engine, pool_id: str) -> int | None: + with engine.connect() as connection: + return connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == pool_id)) + + +def test_check_and_deduct_credits_deducts_exact_amount_when_sufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=3) + + assert deducted_credits == 3 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 5 + + +def test_check_and_deduct_credits_returns_zero_for_non_positive_request() -> None: + assert CreditPoolService.check_and_deduct_credits(tenant_id=str(uuid4()), credits_required=0) == 0 + + +def test_check_and_deduct_credits_raises_when_pool_is_missing() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Credit pool not found"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=str(uuid4()), credits_required=1) + + +def test_check_and_deduct_credits_raises_when_pool_is_empty() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=10) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="No credits remaining"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_check_and_deduct_credits_raises_without_partial_deduction_when_insufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=9) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Insufficient credits remaining"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=3) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 9 + + +def test_check_and_deduct_credits_wraps_unexpected_deduction_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=RuntimeError("database unavailable")), + pytest.raises(QuotaExceededError, match="Failed to deduct credits"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 + + +def test_deduct_credits_capped_returns_zero_for_non_positive_request() -> None: + assert CreditPoolService.deduct_credits_capped(tenant_id=str(uuid4()), credits_required=0) == 0 + + +def test_deduct_credits_capped_returns_zero_when_pool_is_missing() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=str(uuid4()), credits_required=1) + + assert deducted_credits == 0 + + +def test_deduct_credits_capped_returns_zero_when_pool_is_empty() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=10) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert deducted_credits == 0 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_deduct_credits_capped_deducts_only_remaining_balance_when_insufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=9) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=3) + + assert deducted_credits == 1 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_deduct_credits_capped_wraps_unexpected_deduction_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=RuntimeError("database unavailable")), + pytest.raises(QuotaExceededError, match="Failed to deduct credits"), + ): + CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 + + +def test_deduct_credits_capped_reraises_quota_exceeded_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=QuotaExceededError("quota unavailable")), + pytest.raises(QuotaExceededError, match="quota unavailable"), + ): + CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 diff --git a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py index 9a513c3fe6..f5879d973d 100644 --- a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py +++ b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py @@ -22,7 +22,7 @@ class FakeLock: @pytest.fixture -def fake_current_user(monkeypatch): +def fake_current_user(monkeypatch: pytest.MonkeyPatch): user = create_autospec(Account, instance=True) user.id = "user-1" user.current_tenant_id = "tenant-1" @@ -31,7 +31,7 @@ def fake_current_user(monkeypatch): @pytest.fixture -def fake_features(monkeypatch): +def fake_features(monkeypatch: pytest.MonkeyPatch): """Features.billing.enabled == False to skip quota logic.""" features = types.SimpleNamespace( billing=types.SimpleNamespace(enabled=False, subscription=types.SimpleNamespace(plan="ENTERPRISE")), @@ -45,7 +45,7 @@ def fake_features(monkeypatch): @pytest.fixture -def fake_lock(monkeypatch): +def fake_lock(monkeypatch: pytest.MonkeyPatch): """Patch redis_client.lock to always raise LockNotOwnedError on enter.""" def _fake_lock(name, timeout=None, *args, **kwargs): @@ -61,7 +61,7 @@ def fake_lock(monkeypatch): def test_save_document_with_dataset_id_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_features, fake_lock, @@ -118,7 +118,7 @@ def test_save_document_with_dataset_id_ignores_lock_not_owned( def test_add_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): @@ -161,7 +161,7 @@ def test_add_segment_ignores_lock_not_owned( def test_multi_create_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): diff --git a/api/tests/unit_tests/services/test_human_input_service.py b/api/tests/unit_tests/services/test_human_input_service.py index 55af564821..9fc818f789 100644 --- a/api/tests/unit_tests/services/test_human_input_service.py +++ b/api/tests/unit_tests/services/test_human_input_service.py @@ -3,6 +3,7 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.human_input_service as human_input_service_module from core.repositories.human_input_repository import ( @@ -177,7 +178,9 @@ def test_get_form_definition_by_token_for_console_uses_repository(sample_form_re assert form.get_definition() == console_record.definition -def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_calls_repository_and_enqueue( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -204,7 +207,9 @@ def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, m enqueue_spy.assert_called_once_with(sample_form_record.workflow_run_id) -def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_skips_enqueue_for_delivery_test( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) test_record = dataclasses.replace( @@ -227,7 +232,9 @@ def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record enqueue_spy.assert_not_called() -def test_submit_form_by_token_passes_submission_user_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_passes_submission_user_id( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -314,7 +321,7 @@ def test_form_submitted_error_init(): assert error.code == 412 -def test_human_input_service_init_with_engine(mocker): +def test_human_input_service_init_with_engine(mocker: MockerFixture): engine = MagicMock(spec=human_input_service_module.Engine) sessionmaker_mock = mocker.patch("services.human_input_service.sessionmaker") @@ -371,7 +378,7 @@ def test_submit_form_by_token_delivery_not_enabled(mock_session_factory): service.submit_form_by_token(RecipientType.STANDALONE_WEB_APP, "token", "action", {}) -def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker: MockerFixture): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record diff --git a/api/tests/unit_tests/services/test_message_service.py b/api/tests/unit_tests/services/test_message_service.py index 7adc15d63e..51f8b3ef5b 100644 --- a/api/tests/unit_tests/services/test_message_service.py +++ b/api/tests/unit_tests/services/test_message_service.py @@ -906,7 +906,7 @@ class TestMessageServiceSuggestedQuestions: ): """Test successful suggested questions generation in basic Chat mode.""" # Arrange - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) user = factory.create_end_user_mock() message = factory.create_message_mock() mock_get_message.return_value = message @@ -953,7 +953,7 @@ class TestMessageServiceSuggestedQuestions: """Test suggested question generation uses frontend configured model and prompt.""" from core.app.entities.app_invoke_entities import InvokeFrom - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() @@ -1024,7 +1024,7 @@ class TestMessageServiceSuggestedQuestions: factory, ): """Test invalid frontend configured model falls back to tenant default model.""" - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() diff --git a/api/tests/unit_tests/services/test_model_load_balancing_service.py b/api/tests/unit_tests/services/test_model_load_balancing_service.py index 3119af40a2..beecf73caa 100644 --- a/api/tests/unit_tests/services/test_model_load_balancing_service.py +++ b/api/tests/unit_tests/services/test_model_load_balancing_service.py @@ -104,7 +104,7 @@ def test_enable_disable_model_load_balancing_should_call_provider_configuration_ service.provider_manager.get_configurations.return_value = {"openai": provider_configuration} # Act - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) # Assert getattr(provider_configuration, expected_provider_method).assert_called_once_with( @@ -125,7 +125,7 @@ def test_enable_disable_model_load_balancing_should_raise_value_error_when_provi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_raise_value_error_when_provider_missing( @@ -136,7 +136,7 @@ def test_get_load_balancing_configs_should_raise_value_error_when_provider_missi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_for_custom_provider( @@ -177,7 +177,7 @@ def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_fo "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, ) # Assert @@ -238,7 +238,7 @@ def test_get_load_balancing_configs_should_reorder_existing_inherit_and_tolerate "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, config_from="predefined-model", ) @@ -259,7 +259,7 @@ def test_get_load_balancing_config_should_raise_value_error_when_provider_missin # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") def test_get_load_balancing_config_should_return_none_when_config_not_found( @@ -272,7 +272,7 @@ def test_get_load_balancing_config_should_return_none_when_config_not_found( mock_db.session.scalar.return_value = None # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result is None @@ -292,7 +292,7 @@ def test_get_load_balancing_config_should_return_obfuscated_payload_when_config_ mock_db.session.scalar.return_value = config # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result == { @@ -335,7 +335,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_provider_mi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [], "custom-model", ) @@ -354,7 +354,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_configs_is_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], "invalid-configs"), "custom-model", ) @@ -375,7 +375,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_config_item "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], ["bad-item"]), "custom-model", ) @@ -397,7 +397,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credential_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -418,7 +418,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"enabled": True}], "custom-model", ) @@ -428,7 +428,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "cfg-without-enabled"}], "custom-model", ) @@ -450,7 +450,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_existing_co "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-2", "name": "invalid", "enabled": True}], "custom-model", ) @@ -472,7 +472,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-1", "name": "new", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -482,7 +482,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new-config", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -519,7 +519,7 @@ def test_update_load_balancing_configs_should_update_existing_create_new_and_del "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [ {"id": "cfg-1", "name": "updated-name", "enabled": False, "credentials": {"api_key": "plain"}}, {"name": "new-config", "enabled": True, "credentials": {"api_key": "plain"}}, @@ -553,7 +553,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "__inherit__", "enabled": True, "credentials": {"api_key": "x"}}], "custom-model", ) @@ -563,7 +563,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new", "enabled": True}], "custom-model", ) @@ -585,7 +585,7 @@ def test_update_load_balancing_configs_should_create_from_existing_provider_cred "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -611,7 +611,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_provi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) @@ -631,7 +631,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_confi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -654,7 +654,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -662,7 +662,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) diff --git a/api/tests/unit_tests/services/test_model_provider_service.py b/api/tests/unit_tests/services/test_model_provider_service.py index 28d459eac9..9e4eeb2d6e 100644 --- a/api/tests/unit_tests/services/test_model_provider_service.py +++ b/api/tests/unit_tests/services/test_model_provider_service.py @@ -90,7 +90,7 @@ class TestModelProviderServiceConfiguration: ) manager.get_configurations.return_value = {"openai": allowed, "embedding": filtered} - result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM) assert len(result) == 1 assert result[0].provider == "openai" @@ -232,7 +232,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -245,7 +245,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, }, @@ -258,7 +258,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_name": "cred-a", @@ -277,7 +277,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_id": "cred-1", @@ -298,7 +298,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -311,7 +311,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -324,7 +324,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -337,7 +337,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", }, "delete_custom_model", @@ -425,7 +425,7 @@ class TestModelProviderServiceListingsAndDefaults: provider_configurations = SimpleNamespace(get_models=MagicMock(return_value=models)) manager.get_configurations.return_value = provider_configurations - result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) provider_configurations.get_models.assert_called_once_with(model_type=ModelType.LLM, only_active=True) assert len(result) == 1 @@ -495,7 +495,7 @@ class TestModelProviderServiceListingsAndDefaults: ), ) - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is not None assert result.model == "gpt-4o" @@ -506,7 +506,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.return_value = None - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -514,7 +514,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.side_effect = RuntimeError("boom") - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -523,7 +523,7 @@ class TestModelProviderServiceListingsAndDefaults: service.update_default_model_of_model_type( tenant_id="tenant-1", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, provider="openai", model="gpt-4o", ) @@ -593,7 +593,7 @@ class TestModelProviderServiceListingsAndDefaults: tenant_id="tenant-1", provider="openai", model="gpt-4o", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, ) getattr(provider_configuration, provider_method_name).assert_called_once_with( diff --git a/api/tests/unit_tests/services/test_trigger_provider_service.py b/api/tests/unit_tests/services/test_trigger_provider_service.py index 6eba60e5f1..4da4af2d93 100644 --- a/api/tests/unit_tests/services/test_trigger_provider_service.py +++ b/api/tests/unit_tests/services/test_trigger_provider_service.py @@ -325,7 +325,7 @@ def test_update_trigger_subscription_should_raise_error_when_name_conflicts( id="sub-1", name="old", provider_id="langgenius/github/github", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.side_effect = [subscription, object()] # found sub, name conflict _mock_get_trigger_provider(mocker, provider_controller) @@ -350,7 +350,7 @@ def test_update_trigger_subscription_should_update_fields_and_clear_cache( properties={"project": "enc-old"}, parameters={"event": "old"}, credentials={"api_key": "enc-old"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credential_expires_at=0, expires_at=0, ) @@ -456,7 +456,7 @@ def test_delete_trigger_provider_should_delete_and_clear_cache_even_if_unsubscri id="sub-1", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"token": "enc"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -492,7 +492,7 @@ def test_delete_trigger_provider_should_skip_unsubscribe_for_unauthorized( id="sub-2", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.UNAUTHORIZED.value, + credential_type=CredentialType.UNAUTHORIZED, credentials={}, to_entity=lambda: SimpleNamespace(id="sub-2"), ) @@ -527,7 +527,7 @@ def test_refresh_oauth_token_should_raise_error_for_non_oauth_credentials( mocker: MockerFixture, mock_session: MagicMock ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY) mock_session.scalar.return_value = subscription # Act + Assert @@ -545,7 +545,7 @@ def test_refresh_oauth_token_should_refresh_and_persist_new_credentials( subscription = SimpleNamespace( provider_id=str(provider_id), user_id="user-1", - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"access_token": "enc"}, credential_expires_at=0, ) @@ -613,7 +613,7 @@ def test_refresh_subscription_should_refresh_and_persist_properties( parameters={"event": "push"}, properties={"p": "enc"}, credentials={"c": "enc"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.return_value = subscription _mock_get_trigger_provider(mocker, provider_controller) @@ -989,7 +989,7 @@ def test_verify_subscription_credentials_should_raise_when_api_key_validation_fa provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) provider_controller.validate_credentials.side_effect = RuntimeError("bad credentials") @@ -1012,7 +1012,7 @@ def test_verify_subscription_credentials_should_return_verified_when_api_key_val provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1036,7 +1036,7 @@ def test_verify_subscription_credentials_should_return_verified_for_non_api_key_ provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2.value, credentials={}) + subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2, credentials={}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1100,7 +1100,7 @@ def test_rebuild_trigger_subscription_should_raise_for_unsupported_credential_ty provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED.value) + subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1126,7 +1126,7 @@ def test_rebuild_trigger_subscription_should_raise_when_unsubscribe_fails( id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -1159,7 +1159,7 @@ def test_rebuild_trigger_subscription_should_resubscribe_and_update_existing_sub id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old-key"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) diff --git a/api/tests/unit_tests/services/test_webhook_service.py b/api/tests/unit_tests/services/test_webhook_service.py index ffdcc046f9..95edc436d7 100644 --- a/api/tests/unit_tests/services/test_webhook_service.py +++ b/api/tests/unit_tests/services/test_webhook_service.py @@ -140,7 +140,7 @@ class TestWebhookServiceUnit: assert args[1] == "text/plain" assert args[2] is webhook_trigger - def test_detect_binary_mimetype_uses_magic(self, monkeypatch): + def test_detect_binary_mimetype_uses_magic(self, monkeypatch: pytest.MonkeyPatch): """python-magic output should be used when available.""" fake_magic = MagicMock() fake_magic.from_buffer.return_value = "image/png" @@ -151,7 +151,7 @@ class TestWebhookServiceUnit: assert result == "image/png" fake_magic.from_buffer.assert_called_once() - def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch): + def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic is unavailable.""" monkeypatch.setattr("services.trigger.webhook_service.magic", None) @@ -159,7 +159,7 @@ class TestWebhookServiceUnit: assert result == "application/octet-stream" - def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch): + def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic raises an exception.""" try: import magic as real_magic diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index feafada59a..1711e66b23 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -61,7 +61,7 @@ class TestWorkflowAssociatedDataFactory: def create_app_mock( app_id: str = "app-123", tenant_id: str = "tenant-456", - mode: str = AppMode.WORKFLOW.value, + mode: str = AppMode.WORKFLOW, workflow_id: str | None = None, **kwargs, ) -> MagicMock: @@ -93,7 +93,7 @@ class TestWorkflowAssociatedDataFactory: tenant_id: str = "tenant-456", app_id: str = "app-123", version: str = Workflow.VERSION_DRAFT, - workflow_type: str = WorkflowType.WORKFLOW.value, + workflow_type: str = WorkflowType.WORKFLOW, graph: dict[str, Any] | None = None, features: dict[str, Any] | None = None, unique_hash: str | None = None, @@ -584,7 +584,7 @@ class TestWorkflowService: id="published-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version="2026-03-19T00:00:00", graph=json.dumps(TestWorkflowAssociatedDataFactory.create_valid_workflow_graph()), features=json.dumps(legacy_features), @@ -597,7 +597,7 @@ class TestWorkflowService: id="draft-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version=Workflow.VERSION_DRAFT, graph=json.dumps({"nodes": [], "edges": []}), features=json.dumps({}), @@ -685,7 +685,7 @@ class TestWorkflowService: Different app modes have different feature configurations. This ensures the features match the expected schema for workflow apps. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) features = {"file_upload": {"enabled": False}} with patch("services.workflow_service.WorkflowAppConfigManager.config_validate") as mock_validate: @@ -696,7 +696,7 @@ class TestWorkflowService: def test_validate_features_structure_advanced_chat_mode(self, workflow_service): """Test validate_features_structure for advanced chat mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT) features = {"opening_statement": "Hello"} with patch("services.workflow_service.AdvancedChatAppConfigManager.config_validate") as mock_validate: @@ -707,7 +707,7 @@ class TestWorkflowService: def test_validate_features_structure_invalid_mode_raises_error(self, workflow_service): """Test validate_features_structure raises error for invalid mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) features = {} with pytest.raises(ValueError, match="Invalid app mode"): @@ -1326,7 +1326,7 @@ class TestWorkflowService: The conversion creates equivalent workflow nodes from the chat configuration, giving users more control and customization options. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = { "name": "Converted Workflow", @@ -1337,7 +1337,7 @@ class TestWorkflowService: with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1353,13 +1353,13 @@ class TestWorkflowService: Completion apps are simpler (single prompt-response), so the conversion creates a basic workflow with fewer nodes. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {"name": "Converted Workflow"} with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1373,7 +1373,7 @@ class TestWorkflowService: Only chat and completion apps can be converted to workflows. Apps that are already workflows or have other modes cannot be converted. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {} @@ -2087,7 +2087,7 @@ class TestSetupVariablePool: This helper initialises the VariablePool used for single-step workflow execution. """ - def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW.value) -> MagicMock: + def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW) -> MagicMock: wf = MagicMock(spec=Workflow) wf.app_id = "app-1" wf.id = "wf-1" @@ -2176,7 +2176,7 @@ class TestSetupVariablePool: from models.workflow import WorkflowType # Arrange - workflow = self._make_workflow(workflow_type=WorkflowType.CHAT.value) + workflow = self._make_workflow(workflow_type=WorkflowType.CHAT) # Act with ( @@ -2845,7 +2845,7 @@ class TestWorkflowServiceFreeNodeExecution: mock_node_cls.validate_node_data.assert_called_once_with(sentinel.adapted_node_data) mock_node_cls.assert_called_once_with( node_id="n-1", - config=sentinel.node_data, + data=sentinel.node_data, graph_init_params=mock_graph_init_context_cls.return_value.to_graph_init_params.return_value, graph_runtime_state=ANY, runtime=mock_runtime_cls.return_value, diff --git a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py index ce0d94398d..c210db580e 100644 --- a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py +++ b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py @@ -180,7 +180,7 @@ class TestSetDefaultProvider: session.scalar.return_value = None with pytest.raises(ValueError, match="provider not found"): - BuiltinToolManageService.set_default_provider("t", "u", "p", "id") + BuiltinToolManageService.set_default_provider("t", "p", "id") @patch(f"{MODULE}.sessionmaker") @patch(f"{MODULE}.db") @@ -189,11 +189,29 @@ class TestSetDefaultProvider: target = MagicMock() session.scalar.return_value = target - result = BuiltinToolManageService.set_default_provider("t", "u", "p", "id") + result = BuiltinToolManageService.set_default_provider("t", "p", "id") assert result == {"result": "success"} assert target.is_default is True + @patch(f"{MODULE}.sessionmaker") + @patch(f"{MODULE}.db") + def test_clear_default_is_tenant_scoped_not_user_scoped(self, mock_db, mock_sm_cls): + # Regression: clearing prior defaults must NOT filter by user_id, otherwise + # two workspace members can each leave their own credential as default at + # the same time (the default flag is tenant-scoped, not per-user). + session = _mock_sessionmaker(mock_sm_cls) + session.scalar.return_value = MagicMock() + + BuiltinToolManageService.set_default_provider("tenant-1", "google", "cred-id") + + session.execute.assert_called_once() + update_stmt = session.execute.call_args.args[0] + compiled = str(update_stmt.compile(compile_kwargs={"literal_binds": True})) + assert "user_id" not in compiled + assert "tenant_id" in compiled + assert "provider" in compiled + class TestUpdateBuiltinToolProvider: @patch(f"{MODULE}.sessionmaker") diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index 663eec6a06..b5b9f0bd97 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -398,7 +398,7 @@ class TestWorkflowDraftVariableService: self, mock_engine, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable when execution record doesn't exist""" mock_repo_session = Mock(spec=Session) @@ -435,7 +435,7 @@ class TestWorkflowDraftVariableService: def test_reset_node_variable_with_valid_execution_record( self, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable with valid execution record - should restore from execution""" mock_repo_session = Mock(spec=Session) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index dfdbd9acd6..17e9a077d6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -414,8 +414,8 @@ def test_parse_event_message_should_parse_only_json_object( def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: # Arrange - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} # Act is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) @@ -426,7 +426,7 @@ def test_is_terminal_event_should_recognize_finished_and_optional_paused_events( assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, close_on_pause=True) is False def test_apply_message_context_should_update_payload_when_context_exists() -> None: @@ -569,7 +569,7 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) # Act @@ -584,9 +584,9 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -643,7 +643,7 @@ def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_id ) # Assert - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -686,7 +686,7 @@ def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( ) # Assert - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -706,7 +706,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -729,7 +729,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None @@ -779,7 +779,7 @@ def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.M session_maker=cast(sessionmaker[Session], session_maker), ) - assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED assert events[-2]["data"]["form_token"] == "wtok" assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) pause_data = events[-1]["data"] @@ -837,6 +837,6 @@ def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_contex ) pause_event = cast(Mapping[str, Any], events[-1]) - assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py index d2634d7d7b..4d711f1bf8 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py @@ -215,8 +215,8 @@ class TestWorkflowEventSnapshotHelpers: assert result == expected def test_is_terminal_event_should_recognize_finished_and_optional_paused_events(self) -> None: - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} is_finished = service_module._is_terminal_event(finished_event, include_paused=False) paused_without_flag = service_module._is_terminal_event(paused_event, include_paused=False) @@ -225,7 +225,7 @@ class TestWorkflowEventSnapshotHelpers: assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, include_paused=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, include_paused=True) is False def test_apply_message_context_should_update_payload_when_context_exists(self) -> None: payload: dict[str, Any] = {"event": "workflow_started"} @@ -352,7 +352,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) events = list( @@ -365,9 +365,9 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -421,7 +421,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( @@ -461,7 +461,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( @@ -480,7 +480,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -501,5 +501,5 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index 72508bef52..2544c9d61a 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -122,7 +122,7 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) @@ -208,7 +208,7 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) diff --git a/api/uv.lock b/api/uv.lock index 6f75c9f6fe..ad9ce2c4a4 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -50,7 +50,10 @@ members = [ "dify-vdb-vikingdb", "dify-vdb-weaviate", ] -overrides = [{ name = "pyarrow", specifier = ">=18.0.0" }] +overrides = [ + { name = "litellm", specifier = ">=1.83.7" }, + { name = "pyarrow", specifier = ">=18.0.0" }, +] [[package]] name = "abnf" @@ -889,14 +892,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.1" +version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, ] [[package]] @@ -1594,7 +1597,7 @@ requires-dist = [ { name = "gmpy2", specifier = ">=2.3.0" }, { name = "google-api-python-client", specifier = ">=2.195.0" }, { name = "google-cloud-aiplatform", specifier = ">=1.149.0,<2.0.0" }, - { name = "graphon", specifier = "~=0.2.2" }, + { name = "graphon", specifier = "~=0.3.0" }, { name = "gunicorn", specifier = ">=25.3.0" }, { name = "httpx", extras = ["socks"], specifier = ">=0.28.1,<1.0.0" }, { name = "httpx-sse", specifier = "~=0.4.0" }, @@ -2657,14 +2660,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.49" +version = "3.1.50" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/63/210aaa302d6a0a78daa67c5c15bbac2cad361722841278b0209b6da20855/gitpython-3.1.49.tar.gz", hash = "sha256:42f9399c9eb33fc581014bedd76049dfbaf6375aa2a5754575966387280315e1", size = 219367, upload-time = "2026-04-29T00:31:20.478Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/f6/354ae6491228b5eb40e10d89c4d13c651fe1cf7556e35ebdded50cff57ce/gitpython-3.1.50.tar.gz", hash = "sha256:80da2d12504d52e1f998772dc5baf6e553f8d2fcfe1fcc226c9d9a2ee3372dcc", size = 219798, upload-time = "2026-05-06T04:01:26.571Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/6f/b842bfa6f21d6f87c57f9abf7194225e55279d96d869775e19e9f7236fc5/gitpython-3.1.49-py3-none-any.whl", hash = "sha256:024b0422d7f84d15cd794844e029ffebd4c5d42a7eb9b936b458697ef550a02c", size = 212190, upload-time = "2026-04-29T00:31:18.412Z" }, + { url = "https://files.pythonhosted.org/packages/20/7a/1c6e3562dfd8950adbb11ffbc65d21e7c89d01a6e4f137fa981056de25c5/gitpython-3.1.50-py3-none-any.whl", hash = "sha256:d352abe2908d07355014abdd21ddf798c2a961469239afec4962e9da884858f9", size = 212507, upload-time = "2026-05-06T04:01:23.799Z" }, ] [[package]] @@ -2937,7 +2940,7 @@ httpx = [ [[package]] name = "graphon" -version = "0.2.2" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "charset-normalizer" }, @@ -2958,9 +2961,9 @@ dependencies = [ { name = "unstructured", extra = ["docx", "epub", "md", "ppt", "pptx"] }, { name = "webvtt-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/08/50/e745a79c5f742f88f6011a1f7c9ba2c2f9cc1beedd982f0b192f1ab8c748/graphon-0.2.2.tar.gz", hash = "sha256:141f0de536171850f1af6f738dc66f0285aadd3c097f1dad2a038636789e0aa5", size = 236360, upload-time = "2026-04-17T08:52:28.047Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/62/83593d6e7a139ff124711ea05882cadca7065c11a38763aa9360d7e76804/graphon-0.3.0.tar.gz", hash = "sha256:cd38f842ae3dcfa956428b952efbe2a3ea9c1581446647142accbbdeb638b876", size = 241176, upload-time = "2026-04-21T15:18:48.291Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/de/89/a6340afdaf5169d17a318e00fc685fb67ed99baa602c2cbbbf6af6a76096/graphon-0.2.2-py3-none-any.whl", hash = "sha256:754e544d08779138f99eac6547ab08559463680e2c76488b05e1c978210392b4", size = 340808, upload-time = "2026-04-17T08:52:26.5Z" }, + { url = "https://files.pythonhosted.org/packages/b3/f7/81ee8f0368aa6a2d47f97fecc5d4a12865c987906798cbddd0e3b8387f33/graphon-0.3.0-py3-none-any.whl", hash = "sha256:9cca45ebab2a79fd4d04432f55b5b962e9e4f34fa037cc20fee7f18ec80eaa5d", size = 348486, upload-time = "2026-04-21T15:18:46.737Z" }, ] [[package]] @@ -3355,14 +3358,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320, upload-time = "2024-08-20T17:11:42.348Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304, upload-time = "2024-09-11T14:56:08.937Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269, upload-time = "2024-08-20T17:11:41.102Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514, upload-time = "2024-09-11T14:56:07.019Z" }, ] [[package]] @@ -3503,7 +3506,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.25.1" +version = "4.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -3511,9 +3514,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778, upload-time = "2024-07-08T18:40:05.546Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, + { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462, upload-time = "2024-07-08T18:40:00.165Z" }, ] [[package]] @@ -3654,7 +3657,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.83.0" +version = "1.83.14" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -3670,9 +3673,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/92/6ce9737554994ca8e536e5f4f6a87cc7c4774b656c9eb9add071caf7d54b/litellm-1.83.0.tar.gz", hash = "sha256:860bebc76c4bb27b4cf90b4a77acd66dba25aced37e3db98750de8a1766bfb7a", size = 17333062, upload-time = "2026-03-31T05:08:25.331Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/7c/c095649380adc96c8630273c1768c2ad1e74aa2ee1dd8dd05d218a60569f/litellm-1.83.14.tar.gz", hash = "sha256:24aef9b47cdc424c833e32f3727f411741c690832cd1fe4405e0077144fe09c9", size = 14836599, upload-time = "2026-04-26T03:16:10.176Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/2c/a670cc050fcd6f45c6199eb99e259c73aea92edba8d5c2fc1b3686d36217/litellm-1.83.0-py3-none-any.whl", hash = "sha256:88c536d339248f3987571493015784671ba3f193a328e1ea6780dbebaa2094a8", size = 15610306, upload-time = "2026-03-31T05:08:21.987Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5c/1b5691575420135e90578543b2bf219497caa33cfd0af64cb38f30288450/litellm-1.83.14-py3-none-any.whl", hash = "sha256:92b11ba2a32cf80707ddf388d18526696c7999a21b418c5e3b6eda1243d2cfdb", size = 16457054, upload-time = "2026-04-26T03:16:05.72Z" }, ] [[package]] @@ -4135,7 +4138,7 @@ wheels = [ [[package]] name = "openai" -version = "2.8.1" +version = "2.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -4147,9 +4150,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717, upload-time = "2026-02-24T20:02:07.958Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122, upload-time = "2026-02-24T20:02:05.669Z" }, ] [[package]] @@ -6447,27 +6450,28 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, ] [[package]] diff --git a/dev/pytest/pytest_config_tests.py b/dev/pytest/pytest_config_tests.py index d56cceff5e..b136f09c61 100644 --- a/dev/pytest/pytest_config_tests.py +++ b/dev/pytest/pytest_config_tests.py @@ -93,10 +93,16 @@ BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF: frozenset[str] = frozenset( API_CONFIG_SET = set(dotenv_values(Path("api") / Path(".env.example")).keys()) DOCKER_CONFIG_SET = set(dotenv_values(Path("docker") / Path(".env.example")).keys()) -DOCKER_COMPOSE_CONFIG_SET = set() +DOCKER_COMPOSE_CONFIG_SET = set(DOCKER_CONFIG_SET) -with open(Path("docker") / Path("docker-compose.yaml")) as f: - DOCKER_COMPOSE_CONFIG_SET = set(yaml.safe_load(f.read())["x-shared-env"].keys()) +# Read environment variables from the split env files used by docker-compose +# Walk through all .env.example files in subdirectories (per-module structure) +envs_dir = Path("docker") / Path("envs") +if envs_dir.exists(): + for env_file_path in envs_dir.rglob("*.env.example"): + env_keys = set(dotenv_values(env_file_path).keys()) + DOCKER_CONFIG_SET.update(env_keys) + DOCKER_COMPOSE_CONFIG_SET.update(env_keys) def test_yaml_config(): diff --git a/docker/.env.default b/docker/.env.default deleted file mode 100644 index 6f6683b9f5..0000000000 --- a/docker/.env.default +++ /dev/null @@ -1,51 +0,0 @@ -# ------------------------------------------------------------------ -# Minimal defaults for Docker Compose deployments. -# -# Keep local changes in .env. Use .env.example as the full reference -# for advanced and service-specific settings. -# ------------------------------------------------------------------ - -# Public URLs used when Dify generates links. Change these together when -# exposing Dify under another hostname, IP address, or port. -CONSOLE_WEB_URL=http://localhost -SERVICE_API_URL=http://localhost -APP_WEB_URL=http://localhost -FILES_URL=http://localhost -INTERNAL_FILES_URL=http://api:5001 -TRIGGER_URL=http://localhost -ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} -NEXT_PUBLIC_SOCKET_URL=ws://localhost -EXPOSE_PLUGIN_DEBUGGING_HOST=localhost -EXPOSE_PLUGIN_DEBUGGING_PORT=5003 - -# Built-in metadata database defaults. -DB_TYPE=postgresql -DB_USERNAME=postgres -DB_PASSWORD=difyai123456 -DB_HOST=db_postgres -DB_PORT=5432 -DB_DATABASE=dify - -# Built-in Redis defaults. -REDIS_HOST=redis -REDIS_PORT=6379 -REDIS_PASSWORD=difyai123456 - -# Default file storage. -STORAGE_TYPE=opendal -OPENDAL_SCHEME=fs -OPENDAL_FS_ROOT=storage - -# Default vector database. -VECTOR_STORE=weaviate - -# Internal service authentication. Paired values must match. -PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi -PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 - -# Host ports. -EXPOSE_NGINX_PORT=80 -EXPOSE_NGINX_SSL_PORT=443 - -# Docker Compose profiles for bundled services. -COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} diff --git a/docker/.env.example b/docker/.env.example index 122228cdd1..82bd837ffb 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1,1249 +1,157 @@ -# ------------------------------ -# Environment Variables for API service & worker -# ------------------------------ +# ------------------------------------------------------------------ +# Essential defaults for Docker Compose deployments. +# +# For a default deployment, copy this file to .env and run: +# docker compose up -d +# +# Optional and provider-specific variables live under docker/envs/. +# Copy an optional *.env.example file beside itself without the +# .example suffix when you need those advanced settings. +# Values in docker/.env take precedence over docker/envs/*.env files. +# ------------------------------------------------------------------ -# ------------------------------ -# Common Variables -# ------------------------------ - -# The backend URL of the console API, -# used to concatenate the authorization callback. -# If empty, it is the same domain. -# Example: https://api.console.dify.ai +# Core service URLs CONSOLE_API_URL= - -# The front-end URL of the console web, -# used to concatenate some front-end addresses and for CORS configuration use. -# If empty, it is the same domain. -# Example: https://console.dify.ai CONSOLE_WEB_URL= - -# Service API Url, -# used to display Service API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://api.dify.ai SERVICE_API_URL= - -# Trigger external URL -# used to display trigger endpoint API Base URL to the front-end. -# Example: https://api.dify.ai TRIGGER_URL=http://localhost - -# WebApp API backend Url, -# used to declare the back-end URL for the front-end API. -# If empty, it is the same domain. -# Example: https://api.app.dify.ai APP_API_URL= - -# WebApp Url, -# used to display WebAPP API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://app.dify.ai APP_WEB_URL= - -# File preview or download Url prefix. -# used to display File preview or download Url to the front-end or as Multi-model inputs; -# Url is signed and has expiration time. -# Setting FILES_URL is required for file processing plugins. -# - For https://example.com, use FILES_URL=https://example.com -# - For http://example.com, use FILES_URL=http://example.com -# Recommendation: use a dedicated domain (e.g., https://upload.example.com). -# Alternatively, use http://:5001 or http://api:5001, -# ensuring port 5001 is externally accessible (see docker-compose.yaml). FILES_URL= - -# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network. -# Set this to the internal Docker service URL for proper plugin file access. -# Example: INTERNAL_FILES_URL=http://api:5001 INTERNAL_FILES_URL= +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} +NEXT_PUBLIC_SOCKET_URL=ws://localhost -# Ensure UTF-8 encoding +# Runtime and security LANG=C.UTF-8 LC_ALL=C.UTF-8 PYTHONIOENCODING=utf-8 - -# Set UV cache directory to avoid permission issues with non-existent home directory UV_CACHE_DIR=/tmp/.uv-cache - -# ------------------------------ -# Server Configuration -# ------------------------------ - -# The log level for the application. -# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` -LOG_LEVEL=INFO -# Log output format: text or json -LOG_OUTPUT_FORMAT=text -# Log file path -LOG_FILE=/app/logs/server.log -# Log file max size, the unit is MB -LOG_FILE_MAX_SIZE=20 -# Log file max backup count -LOG_FILE_BACKUP_COUNT=5 -# Log dateformat -LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S -# Log Timezone -LOG_TZ=UTC - -# Debug mode, default is false. -# It is recommended to turn on this configuration for local development -# to prevent some problems caused by monkey patch. -DEBUG=false - -# Flask debug mode, it can output trace information at the interface when turned on, -# which is convenient for debugging. -FLASK_DEBUG=false - -# Enable request logging, which will log the request and response information. -# And the log level is DEBUG -ENABLE_REQUEST_LOGGING=False - -# A secret key that is used for securely signing the session cookie -# and encrypting sensitive information on the database. -# You can generate a strong key using `openssl rand -base64 42`. SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U - -# Password for admin user initialization. -# If left unset, admin user will not be prompted for a password -# when creating the initial admin account. -# The length of the password cannot exceed 30 characters. INIT_PASSWORD= - -# Deployment environment. -# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. -# Testing environment. There will be a distinct color label on the front-end page, -# indicating that this environment is a testing environment. DEPLOY_ENV=PRODUCTION - -# Whether to enable the version check policy. -# If set to empty, https://updates.dify.ai will be called for version check. CHECK_UPDATE_URL=https://updates.dify.ai - -# Used to change the OpenAI base address, default is https://api.openai.com/v1. -# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, -# or when a local model provides OpenAI compatible API, it can be replaced. OPENAI_API_BASE=https://api.openai.com/v1 - -# When enabled, migrations will be executed prior to application startup -# and the application will start after the migrations have completed. MIGRATION_ENABLED=true - -# File Access Time specifies a time interval in seconds for the file to be accessed. -# The default value is 300 seconds. FILES_ACCESS_TIMEOUT=300 - -# Collaboration mode toggle -# To open collaboration features, you also need to set SERVER_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker ENABLE_COLLABORATION_MODE=false -# Access token expiration time in minutes -ACCESS_TOKEN_EXPIRE_MINUTES=60 - -# Refresh token expiration time in days -REFRESH_TOKEN_EXPIRE_DAYS=30 - -# The default number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_DEFAULT_ACTIVE_REQUESTS=0 -# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_MAX_ACTIVE_REQUESTS=0 -APP_MAX_EXECUTION_TIME=1200 - -# ------------------------------ -# Container Startup Related Configuration -# Only effective when starting with docker image or docker-compose. -# ------------------------------ - -# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +# Logging and server workers +LOG_LEVEL=INFO +LOG_OUTPUT_FORMAT=text +LOG_FILE=/app/logs/server.log +LOG_FILE_MAX_SIZE=20 +LOG_FILE_BACKUP_COUNT=5 +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +LOG_TZ=UTC +DEBUG=false +FLASK_DEBUG=false +ENABLE_REQUEST_LOGGING=False DIFY_BIND_ADDRESS=0.0.0.0 - -# API service binding port number, default 5001. DIFY_PORT=5001 - -# The number of API server workers, i.e., the number of workers. -# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent -# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers SERVER_WORKER_AMOUNT=1 - -# Defaults to gevent. If using windows, it can be switched to sync or solo. -# -# Warning: Changing this parameter requires disabling patching for -# psycopg2 and gRPC (see `gunicorn.conf.py` and `celery_entrypoint.py`). -# Modifying it may also decrease throughput. -# -# It is strongly discouraged to change this parameter. -# If enable collaboration mode, it must be set to geventwebsocket.gunicorn.workers.GeventWebSocketWorker SERVER_WORKER_CLASS=gevent - -# Default number of worker connections, the default is 10. SERVER_WORKER_CONNECTIONS=10 - -# Similar to SERVER_WORKER_CLASS. -# If using windows, it can be switched to sync or solo. -# -# Warning: Changing this parameter requires disabling patching for -# psycopg2 and gRPC (see `gunicorn_conf.py` and `celery_entrypoint.py`). -# Modifying it may also decrease throughput. -# -# It is strongly discouraged to change this parameter. -CELERY_WORKER_CLASS= - -# Request handling timeout. The default is 200, -# it is recommended to set it to 360 to support a longer sse connection time. GUNICORN_TIMEOUT=360 - -# The number of Celery workers. The default is 4 for development environments -# to allow parallel processing of workflows, document indexing, and other async tasks. -# Adjust based on your system resources and workload requirements. +CELERY_WORKER_CLASS= CELERY_WORKER_AMOUNT=4 - -# Flag indicating whether to enable autoscaling of Celery workers. -# -# Autoscaling is useful when tasks are CPU intensive and can be dynamically -# allocated and deallocated based on the workload. -# -# When autoscaling is enabled, the maximum and minimum number of workers can -# be specified. The autoscaling algorithm will dynamically adjust the number -# of workers within the specified range. -# -# Default is false (i.e., autoscaling is disabled). -# -# Example: -# CELERY_AUTO_SCALE=true CELERY_AUTO_SCALE=false - -# The maximum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. CELERY_MAX_WORKERS= - -# The minimum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. CELERY_MIN_WORKERS= +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s -# API Tool configuration -API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 -API_TOOL_DEFAULT_READ_TIMEOUT=60 - -# ------------------------------- -# Datasource Configuration -# -------------------------------- -ENABLE_WEBSITE_JINAREADER=true -ENABLE_WEBSITE_FIRECRAWL=true -ENABLE_WEBSITE_WATERCRAWL=true - -# Enable inline LaTeX rendering with single dollar signs ($...$) in the web frontend -# Default is false for security reasons to prevent conflicts with regular text -NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false - -# ------------------------------ -# Database Configuration -# The database uses PostgreSQL or MySQL. OceanBase and seekdb are also supported. Please use the public schema. -# It is consistent with the configuration in the database service below. -# You can adjust the database configuration according to your needs. -# ------------------------------ - -# Database type, supported values are `postgresql`, `mysql`, `oceanbase`, `seekdb` +# Database DB_TYPE=postgresql -# For MySQL, only `root` user is supported for now DB_USERNAME=postgres DB_PASSWORD=difyai123456 DB_HOST=db_postgres DB_PORT=5432 DB_DATABASE=dify - -# The size of the database connection pool. -# The default is 30 connections, which can be appropriately increased. SQLALCHEMY_POOL_SIZE=30 -# The default is 10 connections, which allows temporary overflow beyond the pool size. SQLALCHEMY_MAX_OVERFLOW=10 -# Database connection pool recycling time, the default is 3600 seconds. SQLALCHEMY_POOL_RECYCLE=3600 -# Whether to print SQL, default is false. SQLALCHEMY_ECHO=false -# If True, will test connections for liveness upon each checkout SQLALCHEMY_POOL_PRE_PING=false -# Whether to enable the Last in first out option or use default FIFO queue if is false SQLALCHEMY_POOL_USE_LIFO=false -# Number of seconds to wait for a connection from the pool before raising a timeout error. -# Default is 30 SQLALCHEMY_POOL_TIMEOUT=30 - -# Maximum number of connections to the database -# Default is 100 -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback +PGDATA=/var/lib/postgresql/data/pgdata POSTGRES_MAX_CONNECTIONS=200 - -# Sets the amount of shared memory used for postgres's shared buffers. -# Default is 128MB -# Recommended value: 25% of available memory -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS POSTGRES_SHARED_BUFFERS=128MB - -# Sets the amount of memory used by each database worker for working space. -# Default is 4MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM POSTGRES_WORK_MEM=4MB - -# Sets the amount of memory reserved for maintenance activities. -# Default is 64MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM POSTGRES_MAINTENANCE_WORK_MEM=64MB - -# Sets the planner's assumption about the effective cache size. -# Default is 4096MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB - -# Sets the maximum allowed duration of any statement before termination. -# Default is 0 (no timeout). -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT -# A value of 0 prevents the server from timing out statements. POSTGRES_STATEMENT_TIMEOUT=0 - -# Sets the maximum allowed duration of any idle in-transaction session before termination. -# Default is 0 (no timeout). -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT -# A value of 0 prevents the server from terminating idle sessions. POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0 -# MySQL Performance Configuration -# Maximum number of connections to MySQL -# -# Default is 1000 -MYSQL_MAX_CONNECTIONS=1000 - -# InnoDB buffer pool size -# Default is 512M -# Recommended value: 70-80% of available memory for dedicated MySQL server -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size -MYSQL_INNODB_BUFFER_POOL_SIZE=512M - -# InnoDB log file size -# Default is 128M -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size -MYSQL_INNODB_LOG_FILE_SIZE=128M - -# InnoDB flush log at transaction commit -# Default is 2 (flush to OS cache, sync every second) -# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache) -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit -MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2 - -# ------------------------------ -# Redis Configuration -# This Redis configuration is used for caching and for pub/sub during conversation. -# ------------------------------ - +# Redis and Celery REDIS_HOST=redis REDIS_PORT=6379 REDIS_USERNAME= REDIS_PASSWORD=difyai123456 REDIS_USE_SSL=false -# SSL configuration for Redis (when REDIS_USE_SSL=true) REDIS_SSL_CERT_REQS=CERT_NONE -# Options: CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED REDIS_SSL_CA_CERTS= -# Path to CA certificate file for SSL verification REDIS_SSL_CERTFILE= -# Path to client certificate file for SSL authentication REDIS_SSL_KEYFILE= -# Path to client private key file for SSL authentication REDIS_DB=0 -# Optional global prefix for Redis keys, topics, streams, and Celery Redis transport artifacts. -# Leave empty to preserve current unprefixed behavior. REDIS_KEY_PREFIX= -# Optional: limit total Redis connections used by API/Worker (unset for default) -# Align with API's REDIS_MAX_CONNECTIONS in configs REDIS_MAX_CONNECTIONS= - -# Whether to use Redis Sentinel mode. -# If set to true, the application will automatically discover and connect to the master node through Sentinel. -REDIS_USE_SENTINEL=false - -# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. -# Format: `:,:,:` -REDIS_SENTINELS= -REDIS_SENTINEL_SERVICE_NAME= -REDIS_SENTINEL_USERNAME= -REDIS_SENTINEL_PASSWORD= -REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 - -# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. -# Format: `:,:,:` -REDIS_USE_CLUSTERS=false -REDIS_CLUSTERS= -REDIS_CLUSTERS_PASSWORD= - -# Redis connection and retry configuration -# max redis retry REDIS_RETRY_RETRIES=3 -# Base delay (in seconds) for exponential backoff on retries REDIS_RETRY_BACKOFF_BASE=1.0 -# Cap (in seconds) for exponential backoff on retries REDIS_RETRY_BACKOFF_CAP=10.0 -# Timeout (in seconds) for Redis socket operations REDIS_SOCKET_TIMEOUT=5.0 -# Timeout (in seconds) for establishing a Redis connection REDIS_SOCKET_CONNECT_TIMEOUT=5.0 -# Interval (in seconds) for Redis health checks REDIS_HEALTH_CHECK_INTERVAL=30 - -# ------------------------------ -# Celery Configuration -# ------------------------------ - -# Use standalone redis as the broker, and redis db 1 for celery broker. (redis_username is usually set by default as empty) -# Format as follows: `redis://:@:/`. -# Example: redis://:difyai123456@redis:6379/1 -# If use Redis Sentinel, format as follows: `sentinel://:@:/` -# For high availability, you can configure multiple Sentinel nodes (if provided) separated by semicolons like below example: -# Example: sentinel://:difyai123456@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1 CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 CELERY_BACKEND=redis BROKER_USE_SSL=false - -# If you are using Redis Sentinel for high availability, configure the following settings. -CELERY_USE_SENTINEL=false -CELERY_SENTINEL_MASTER_NAME= -CELERY_SENTINEL_PASSWORD= -CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 -# e.g. {"tasks.add": {"rate_limit": "10/s"}} CELERY_TASK_ANNOTATIONS=null +EVENT_BUS_REDIS_URL= +EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub +EVENT_BUS_REDIS_USE_CLUSTERS=false -# ------------------------------ -# CORS Configuration -# Used to set the front-end cross-domain access policy. -# ------------------------------ - -# Specifies the allowed origins for cross-origin requests to the Web API, -# e.g. https://dify.app or * for all origins. +# Web and app limits WEB_API_CORS_ALLOW_ORIGINS=* - -# Specifies the allowed origins for cross-origin requests to the console API, -# e.g. https://cloud.dify.ai or * for all origins. CONSOLE_CORS_ALLOW_ORIGINS=* -# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site's top-level domain (e.g., `example.com`). Leading dots are optional. COOKIE_DOMAIN= -# When the frontend and backend run on different subdomains, set NEXT_PUBLIC_COOKIE_DOMAIN=1. NEXT_PUBLIC_COOKIE_DOMAIN= -# WebSocket server URL. -NEXT_PUBLIC_SOCKET_URL=ws://localhost NEXT_PUBLIC_BATCH_CONCURRENCY=5 +API_SENTRY_DSN= +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 +WEB_SENTRY_DSN= +AMPLITUDE_API_KEY= +TEXT_GENERATION_TIMEOUT_MS=60000 +CSP_WHITELIST= +ALLOW_EMBED=false +ALLOW_INLINE_STYLES=false +ALLOW_UNSAFE_DATA_SCHEME=false +TOP_K_MAX_VALUE=10 +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 +LOOP_NODE_MAX_COUNT=100 +MAX_TOOLS_NUM=10 +MAX_PARALLEL_LIMIT=10 +MAX_ITERATIONS_NUM=99 +MAX_TREE_DEPTH=50 +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true +NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false +EXPERIMENTAL_ENABLE_VINEXT=false -# ------------------------------ -# File Storage Configuration -# ------------------------------ - -# The type of storage to use for storing user files. +# Storage and default vector store STORAGE_TYPE=opendal - -# Apache OpenDAL Configuration -# The configuration for OpenDAL consists of the following format: OPENDAL__. -# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. -# Dify will scan configurations starting with OPENDAL_ and automatically apply them. -# The scheme name for the OpenDAL storage. OPENDAL_SCHEME=fs -# Configurations for OpenDAL Local File System. OPENDAL_FS_ROOT=storage - -# ClickZetta Volume Configuration (for storage backend) -# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume -# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters - -# Volume type selection (three types available): -# - user: Personal/small team use, simple config, user-level permissions -# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions -# - external: Data lake integration, external storage connection, volume-level + storage-level permissions -CLICKZETTA_VOLUME_TYPE=user - -# External Volume name (required only when TYPE=external) -CLICKZETTA_VOLUME_NAME= - -# Table Volume table prefix (used only when TYPE=table) -CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ - -# Dify file directory prefix (isolates from other apps, recommended to keep default) -CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km - -# S3 Configuration -# -S3_ENDPOINT= -S3_REGION=us-east-1 -S3_BUCKET_NAME=difyai -S3_ACCESS_KEY= -S3_SECRET_KEY= -S3_ADDRESS_STYLE=auto -# Whether to use AWS managed IAM roles for authenticating with the S3 service. -# If set to false, the access key and secret key must be provided. -S3_USE_AWS_MANAGED_IAM=false - -# Workflow run and Conversation archive storage (S3-compatible) -ARCHIVE_STORAGE_ENABLED=false -ARCHIVE_STORAGE_ENDPOINT= -ARCHIVE_STORAGE_ARCHIVE_BUCKET= -ARCHIVE_STORAGE_EXPORT_BUCKET= -ARCHIVE_STORAGE_ACCESS_KEY= -ARCHIVE_STORAGE_SECRET_KEY= -ARCHIVE_STORAGE_REGION=auto - -# Azure Blob Configuration -# -AZURE_BLOB_ACCOUNT_NAME=difyai -AZURE_BLOB_ACCOUNT_KEY=difyai -AZURE_BLOB_CONTAINER_NAME=difyai-container -AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net - -# Google Storage Configuration -# -GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name -GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= - -# The Alibaba Cloud OSS configurations, -# -ALIYUN_OSS_BUCKET_NAME=your-bucket-name -ALIYUN_OSS_ACCESS_KEY=your-access-key -ALIYUN_OSS_SECRET_KEY=your-secret-key -ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com -ALIYUN_OSS_REGION=ap-southeast-1 -ALIYUN_OSS_AUTH_VERSION=v4 -# Don't start with '/'. OSS doesn't support leading slash in object names. -ALIYUN_OSS_PATH=your-path -# Optional CloudBox ID for Aliyun OSS, DO NOT enable it if you are not using CloudBox. -#ALIYUN_CLOUDBOX_ID=your-cloudbox-id - -# Tencent COS Configuration -# -TENCENT_COS_BUCKET_NAME=your-bucket-name -TENCENT_COS_SECRET_KEY=your-secret-key -TENCENT_COS_SECRET_ID=your-secret-id -TENCENT_COS_REGION=your-region -TENCENT_COS_SCHEME=your-scheme -TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain - -# Oracle Storage Configuration -# -OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com -OCI_BUCKET_NAME=your-bucket-name -OCI_ACCESS_KEY=your-access-key -OCI_SECRET_KEY=your-secret-key -OCI_REGION=us-ashburn-1 - -# Huawei OBS Configuration -# -HUAWEI_OBS_BUCKET_NAME=your-bucket-name -HUAWEI_OBS_SECRET_KEY=your-secret-key -HUAWEI_OBS_ACCESS_KEY=your-access-key -HUAWEI_OBS_SERVER=your-server-url -HUAWEI_OBS_PATH_STYLE=false - -# Volcengine TOS Configuration -# -VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name -VOLCENGINE_TOS_SECRET_KEY=your-secret-key -VOLCENGINE_TOS_ACCESS_KEY=your-access-key -VOLCENGINE_TOS_ENDPOINT=your-server-url -VOLCENGINE_TOS_REGION=your-region - -# Baidu OBS Storage Configuration -# -BAIDU_OBS_BUCKET_NAME=your-bucket-name -BAIDU_OBS_SECRET_KEY=your-secret-key -BAIDU_OBS_ACCESS_KEY=your-access-key -BAIDU_OBS_ENDPOINT=your-server-url - -# Supabase Storage Configuration -# -SUPABASE_BUCKET_NAME=your-bucket-name -SUPABASE_API_KEY=your-access-key -SUPABASE_URL=your-server-url - -# ------------------------------ -# Vector Database Configuration -# ------------------------------ - -# The type of vector store to use. -# Supported values are `weaviate`, `oceanbase`, `seekdb`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`, `vastbase`, `tidb`, `tidb_on_qdrant`, `baidu`, `lindorm`, `huawei_cloud`, `upstash`, `matrixone`, `clickzetta`, `alibabacloud_mysql`, `iris`, `hologres`. VECTOR_STORE=weaviate -# Prefix used to create collection name in vector database VECTOR_INDEX_NAME_PREFIX=Vector_index - -# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. WEAVIATE_ENDPOINT=http://weaviate:8080 WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051 WEAVIATE_TOKENIZATION=word - -# For OceanBase metadata database configuration, available when `DB_TYPE` is `oceanbase`. -# For OceanBase vector database configuration, available when `VECTOR_STORE` is `oceanbase` -# If you want to use OceanBase as both vector database and metadata database, you need to set both `DB_TYPE` and `VECTOR_STORE` to `oceanbase`, and set Database Configuration is the same as the vector database. -# seekdb is the lite version of OceanBase and shares the connection configuration with OceanBase. -OCEANBASE_VECTOR_HOST=oceanbase -OCEANBASE_VECTOR_PORT=2881 -OCEANBASE_VECTOR_USER=root@test -OCEANBASE_VECTOR_PASSWORD=difyai123456 -OCEANBASE_VECTOR_DATABASE=test -OCEANBASE_CLUSTER_NAME=difyai -OCEANBASE_MEMORY_LIMIT=6G -OCEANBASE_ENABLE_HYBRID_SEARCH=false -# For OceanBase vector database, built-in fulltext parsers are `ngram`, `beng`, `space`, `ngram2`, `ik` -# For OceanBase vector database, external fulltext parsers (require plugin installation) are `japanese_ftparser`, `thai_ftparser` -OCEANBASE_FULLTEXT_PARSER=ik -SEEKDB_MEMORY_LIMIT=2G - -# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. -QDRANT_URL=http://qdrant:6333 -QDRANT_API_KEY=difyai123456 -QDRANT_CLIENT_TIMEOUT=20 -QDRANT_GRPC_ENABLED=false -QDRANT_GRPC_PORT=6334 -QDRANT_REPLICATION_FACTOR=1 - -# Milvus configuration. Only available when VECTOR_STORE is `milvus`. -# The milvus uri. -MILVUS_URI=http://host.docker.internal:19530 -MILVUS_DATABASE= -MILVUS_TOKEN= -MILVUS_USER= -MILVUS_PASSWORD= -MILVUS_ENABLE_HYBRID_SEARCH=False -MILVUS_ANALYZER_PARAMS= - -# MyScale configuration, only available when VECTOR_STORE is `myscale` -# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: -# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters -MYSCALE_HOST=myscale -MYSCALE_PORT=8123 -MYSCALE_USER=default -MYSCALE_PASSWORD= -MYSCALE_DATABASE=dify -MYSCALE_FTS_PARAMS= - -# Couchbase configurations, only available when VECTOR_STORE is `couchbase` -# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) -COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server -COUCHBASE_USER=Administrator -COUCHBASE_PASSWORD=password -COUCHBASE_BUCKET_NAME=Embeddings -COUCHBASE_SCOPE_NAME=_default - -# Hologres configurations, only available when VECTOR_STORE is `hologres` -# access_key_id is used as the PG username, access_key_secret is used as the PG password -HOLOGRES_HOST= -HOLOGRES_PORT=80 -HOLOGRES_DATABASE= -HOLOGRES_ACCESS_KEY_ID= -HOLOGRES_ACCESS_KEY_SECRET= -HOLOGRES_SCHEMA=public -HOLOGRES_TOKENIZER=jieba -HOLOGRES_DISTANCE_METHOD=Cosine -HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq -HOLOGRES_MAX_DEGREE=64 -HOLOGRES_EF_CONSTRUCTION=400 - -# pgvector configurations, only available when VECTOR_STORE is `pgvector` -PGVECTOR_HOST=pgvector -PGVECTOR_PORT=5432 -PGVECTOR_USER=postgres -PGVECTOR_PASSWORD=difyai123456 -PGVECTOR_DATABASE=dify -PGVECTOR_MIN_CONNECTION=1 -PGVECTOR_MAX_CONNECTION=5 -PGVECTOR_PG_BIGM=false -PGVECTOR_PG_BIGM_VERSION=1.2-20240606 - -# vastbase configurations, only available when VECTOR_STORE is `vastbase` -VASTBASE_HOST=vastbase -VASTBASE_PORT=5432 -VASTBASE_USER=dify -VASTBASE_PASSWORD=Difyai123456 -VASTBASE_DATABASE=dify -VASTBASE_MIN_CONNECTION=1 -VASTBASE_MAX_CONNECTION=5 - -# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` -PGVECTO_RS_HOST=pgvecto-rs -PGVECTO_RS_PORT=5432 -PGVECTO_RS_USER=postgres -PGVECTO_RS_PASSWORD=difyai123456 -PGVECTO_RS_DATABASE=dify - -# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` -ANALYTICDB_KEY_ID=your-ak -ANALYTICDB_KEY_SECRET=your-sk -ANALYTICDB_REGION_ID=cn-hangzhou -ANALYTICDB_INSTANCE_ID=gp-ab123456 -ANALYTICDB_ACCOUNT=testaccount -ANALYTICDB_PASSWORD=testpassword -ANALYTICDB_NAMESPACE=dify -ANALYTICDB_NAMESPACE_PASSWORD=difypassword -ANALYTICDB_HOST=gp-test.aliyuncs.com -ANALYTICDB_PORT=5432 -ANALYTICDB_MIN_CONNECTION=1 -ANALYTICDB_MAX_CONNECTION=5 - -# TiDB vector configurations, only available when VECTOR_STORE is `tidb_vector` -TIDB_VECTOR_HOST=tidb -TIDB_VECTOR_PORT=4000 -TIDB_VECTOR_USER= -TIDB_VECTOR_PASSWORD= -TIDB_VECTOR_DATABASE=dify - -# Matrixone vector configurations. -MATRIXONE_HOST=matrixone -MATRIXONE_PORT=6001 -MATRIXONE_USER=dump -MATRIXONE_PASSWORD=111 -MATRIXONE_DATABASE=dify - -# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` -TIDB_ON_QDRANT_URL=http://127.0.0.1 -TIDB_ON_QDRANT_API_KEY=dify -TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 -TIDB_ON_QDRANT_GRPC_ENABLED=false -TIDB_ON_QDRANT_GRPC_PORT=6334 -TIDB_PUBLIC_KEY=dify -TIDB_PRIVATE_KEY=dify -TIDB_API_URL=http://127.0.0.1 -TIDB_IAM_API_URL=http://127.0.0.1 -TIDB_REGION=regions/aws-us-east-1 -TIDB_PROJECT_ID=dify -TIDB_SPEND_LIMIT=100 - -# Chroma configuration, only available when VECTOR_STORE is `chroma` -CHROMA_HOST=127.0.0.1 -CHROMA_PORT=8000 -CHROMA_TENANT=default_tenant -CHROMA_DATABASE=default_database -CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider -CHROMA_AUTH_CREDENTIALS= - -# Oracle configuration, only available when VECTOR_STORE is `oracle` -ORACLE_USER=dify -ORACLE_PASSWORD=dify -ORACLE_DSN=oracle:1521/FREEPDB1 -ORACLE_CONFIG_DIR=/app/api/storage/wallet -ORACLE_WALLET_LOCATION=/app/api/storage/wallet -ORACLE_WALLET_PASSWORD=dify -ORACLE_IS_AUTONOMOUS=false - -# AlibabaCloud MySQL configuration, only available when VECTOR_STORE is `alibabcloud_mysql` -ALIBABACLOUD_MYSQL_HOST=127.0.0.1 -ALIBABACLOUD_MYSQL_PORT=3306 -ALIBABACLOUD_MYSQL_USER=root -ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 -ALIBABACLOUD_MYSQL_DATABASE=dify -ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 -ALIBABACLOUD_MYSQL_HNSW_M=6 - -# relyt configurations, only available when VECTOR_STORE is `relyt` -RELYT_HOST=db -RELYT_PORT=5432 -RELYT_USER=postgres -RELYT_PASSWORD=difyai123456 -RELYT_DATABASE=postgres - -# open search configuration, only available when VECTOR_STORE is `opensearch` -OPENSEARCH_HOST=opensearch -OPENSEARCH_PORT=9200 -OPENSEARCH_SECURE=true -OPENSEARCH_VERIFY_CERTS=true -OPENSEARCH_AUTH_METHOD=basic -OPENSEARCH_USER=admin -OPENSEARCH_PASSWORD=admin -# If using AWS managed IAM, e.g. Managed Cluster or OpenSearch Serverless -OPENSEARCH_AWS_REGION=ap-southeast-1 -OPENSEARCH_AWS_SERVICE=aoss - -# tencent vector configurations, only available when VECTOR_STORE is `tencent` -TENCENT_VECTOR_DB_URL=http://127.0.0.1 -TENCENT_VECTOR_DB_API_KEY=dify -TENCENT_VECTOR_DB_TIMEOUT=30 -TENCENT_VECTOR_DB_USERNAME=dify -TENCENT_VECTOR_DB_DATABASE=dify -TENCENT_VECTOR_DB_SHARD=1 -TENCENT_VECTOR_DB_REPLICAS=2 -TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false - -# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` -ELASTICSEARCH_HOST=0.0.0.0 -ELASTICSEARCH_PORT=9200 -ELASTICSEARCH_USERNAME=elastic -ELASTICSEARCH_PASSWORD=elastic -KIBANA_PORT=5601 - -# Using ElasticSearch Cloud Serverless, or not. -ELASTICSEARCH_USE_CLOUD=false -ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL -ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY - -ELASTICSEARCH_VERIFY_CERTS=False -ELASTICSEARCH_CA_CERTS= -ELASTICSEARCH_REQUEST_TIMEOUT=100000 -ELASTICSEARCH_RETRY_ON_TIMEOUT=True -ELASTICSEARCH_MAX_RETRIES=10 - -# baidu vector configurations, only available when VECTOR_STORE is `baidu` -BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 -BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 -BAIDU_VECTOR_DB_ACCOUNT=root -BAIDU_VECTOR_DB_API_KEY=dify -BAIDU_VECTOR_DB_DATABASE=dify -BAIDU_VECTOR_DB_SHARD=1 -BAIDU_VECTOR_DB_REPLICAS=3 -BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER -BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE -BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500 -BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05 -BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300 - -# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` -VIKINGDB_ACCESS_KEY=your-ak -VIKINGDB_SECRET_KEY=your-sk -VIKINGDB_REGION=cn-shanghai -VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http -VIKINGDB_CONNECTION_TIMEOUT=30 -VIKINGDB_SOCKET_TIMEOUT=30 - -# Lindorm configuration, only available when VECTOR_STORE is `lindorm` -LINDORM_URL=http://localhost:30070 -LINDORM_USERNAME=admin -LINDORM_PASSWORD=admin -LINDORM_USING_UGC=True -LINDORM_QUERY_TIMEOUT=1 - -# opengauss configurations, only available when VECTOR_STORE is `opengauss` -OPENGAUSS_HOST=opengauss -OPENGAUSS_PORT=6600 -OPENGAUSS_USER=postgres -OPENGAUSS_PASSWORD=Dify@123 -OPENGAUSS_DATABASE=dify -OPENGAUSS_MIN_CONNECTION=1 -OPENGAUSS_MAX_CONNECTION=5 -OPENGAUSS_ENABLE_PQ=false - -# huawei cloud search service vector configurations, only available when VECTOR_STORE is `huawei_cloud` -HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 -HUAWEI_CLOUD_USER=admin -HUAWEI_CLOUD_PASSWORD=admin - -# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` -UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io -UPSTASH_VECTOR_TOKEN=dify - -# TableStore Vector configuration -# (only used when VECTOR_STORE is tablestore) -TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com -TABLESTORE_INSTANCE_NAME=instance-name -TABLESTORE_ACCESS_KEY_ID=xxx -TABLESTORE_ACCESS_KEY_SECRET=xxx -TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false - -# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` -CLICKZETTA_USERNAME= -CLICKZETTA_PASSWORD= -CLICKZETTA_INSTANCE= -CLICKZETTA_SERVICE=api.clickzetta.com -CLICKZETTA_WORKSPACE=quick_start -CLICKZETTA_VCLUSTER=default_ap -CLICKZETTA_SCHEMA=dify -CLICKZETTA_BATCH_SIZE=100 -CLICKZETTA_ENABLE_INVERTED_INDEX=true -CLICKZETTA_ANALYZER_TYPE=chinese -CLICKZETTA_ANALYZER_MODE=smart -CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance - -# InterSystems IRIS configuration, only available when VECTOR_STORE is `iris` -IRIS_HOST=iris -IRIS_SUPER_SERVER_PORT=1972 -IRIS_WEB_SERVER_PORT=52773 -IRIS_USER=_SYSTEM -IRIS_PASSWORD=Dify@1234 -IRIS_DATABASE=USER -IRIS_SCHEMA=dify -IRIS_CONNECTION_URL= -IRIS_MIN_CONNECTION=1 -IRIS_MAX_CONNECTION=3 -IRIS_TEXT_INDEX=true -IRIS_TEXT_INDEX_LANGUAGE=en -IRIS_TIMEZONE=UTC - -# ------------------------------ -# Knowledge Configuration -# ------------------------------ - -# Upload file size limit, default 15M. -UPLOAD_FILE_SIZE_LIMIT=15 - -# The maximum number of files that can be uploaded at a time, default 5. -UPLOAD_FILE_BATCH_LIMIT=5 - -# Comma-separated list of file extensions blocked from upload for security reasons. -# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll). -# Empty by default to allow all file types. -# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll -UPLOAD_FILE_EXTENSION_BLACKLIST= - -# Maximum number of files allowed in a single chunk attachment, default 10. -SINGLE_CHUNK_ATTACHMENT_LIMIT=10 - -# Maximum number of files allowed in a image batch upload operation -IMAGE_FILE_BATCH_LIMIT=10 - -# Maximum allowed image file size for attachments in megabytes, default 2. -ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2 - -# Timeout for downloading image attachments in seconds, default 60. -ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60 - - -# ETL type, support: `dify`, `Unstructured` -# `dify` Dify's proprietary file extraction scheme -# `Unstructured` Unstructured.io file extraction scheme -ETL_TYPE=dify - -# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured -# Or using Unstructured for document extractor node for pptx. -# For example: http://unstructured:8000/general/v0/general -UNSTRUCTURED_API_URL= -UNSTRUCTURED_API_KEY= -SCARF_NO_ANALYTICS=true - -# ------------------------------ -# Model Configuration -# ------------------------------ - -# The maximum number of tokens allowed for prompt generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating a prompt in the prompt generation tool. -# Default: 512 tokens. -PROMPT_GENERATION_MAX_TOKENS=512 - -# The maximum number of tokens allowed for code generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating code in the code generation tool. -# Default: 1024 tokens. -CODE_GENERATION_MAX_TOKENS=1024 - -# Enable or disable plugin based token counting. If disabled, token counting will return 0. -# This can improve performance by skipping token counting operations. -# Default: false (disabled). -PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false - -# ------------------------------ -# Multi-modal Configuration -# ------------------------------ - -# The format of the image/video/audio/document sent when the multi-modal model is input, -# the default is base64, optional url. -# The delay of the call in url mode will be lower than that in base64 mode. -# It is generally recommended to use the more compatible base64 mode. -# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. -MULTIMODAL_SEND_FORMAT=base64 -# Upload image file size limit, default 10M. -UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 -# Upload video file size limit, default 100M. -UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 -# Upload audio file size limit, default 50M. -UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 - -# ------------------------------ -# Sentry Configuration -# Used for application monitoring and error log tracking. -# ------------------------------ -SENTRY_DSN= - -# API Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -API_SENTRY_DSN= -# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. -API_SENTRY_TRACES_SAMPLE_RATE=1.0 -# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. -API_SENTRY_PROFILES_SAMPLE_RATE=1.0 - -# Web Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -WEB_SENTRY_DSN= - -# Plugin_daemon Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -PLUGIN_SENTRY_ENABLED=false -PLUGIN_SENTRY_DSN= - -# ------------------------------ -# Notion Integration Configuration -# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations -# ------------------------------ - -# Configure as "public" or "internal". -# Since Notion's OAuth redirect URL only supports HTTPS, -# if deploying locally, please use Notion's internal integration. -NOTION_INTEGRATION_TYPE=public -# Notion OAuth client secret (used for public integration type) -NOTION_CLIENT_SECRET= -# Notion OAuth client id (used for public integration type) -NOTION_CLIENT_ID= -# Notion internal integration secret. -# If the value of NOTION_INTEGRATION_TYPE is "internal", -# you need to configure this variable. -NOTION_INTERNAL_SECRET= - -# ------------------------------ -# Mail related configuration -# ------------------------------ - -# Mail type, support: resend, smtp, sendgrid -MAIL_TYPE= - -# Default send from email address, if not specified -# If using SendGrid, use the 'from' field for authentication if necessary. -MAIL_DEFAULT_SEND_FROM= - -# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. -RESEND_API_URL=https://api.resend.com -RESEND_API_KEY= - - -# SMTP server configuration, used when MAIL_TYPE is `smtp` -SMTP_SERVER= -SMTP_PORT=465 -SMTP_USERNAME= -SMTP_PASSWORD= -SMTP_USE_TLS=true -SMTP_OPPORTUNISTIC_TLS=false -# Optional: override the local hostname used for SMTP HELO/EHLO -SMTP_LOCAL_HOSTNAME= - -# Sendgid configuration -SENDGRID_API_KEY= - -# ------------------------------ -# Others Configuration -# ------------------------------ - -# Maximum length of segmentation tokens for indexing -INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 - -# Member invitation link valid time (hours), -# Default: 72. -INVITE_EXPIRY_HOURS=72 - -# Reset password token valid time (minutes), -RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 -EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 -CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 -OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 - -# The sandbox service endpoint. -CODE_EXECUTION_ENDPOINT=http://sandbox:8194 -CODE_EXECUTION_API_KEY=dify-sandbox -CODE_EXECUTION_SSL_VERIFY=True -CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 -CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 -CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 -CODE_MAX_NUMBER=9223372036854775807 -CODE_MIN_NUMBER=-9223372036854775808 -CODE_MAX_DEPTH=5 -CODE_MAX_PRECISION=20 -CODE_MAX_STRING_LENGTH=400000 -CODE_MAX_STRING_ARRAY_LENGTH=30 -CODE_MAX_OBJECT_ARRAY_LENGTH=30 -CODE_MAX_NUMBER_ARRAY_LENGTH=1000 -CODE_EXECUTION_CONNECT_TIMEOUT=10 -CODE_EXECUTION_READ_TIMEOUT=60 -CODE_EXECUTION_WRITE_TIMEOUT=10 -TEMPLATE_TRANSFORM_MAX_LENGTH=400000 - -# Workflow runtime configuration -WORKFLOW_MAX_EXECUTION_STEPS=500 -WORKFLOW_MAX_EXECUTION_TIME=1200 -WORKFLOW_CALL_MAX_DEPTH=5 -MAX_VARIABLE_SIZE=204800 -WORKFLOW_FILE_UPLOAD_LIMIT=10 - -# GraphEngine Worker Pool Configuration -# Minimum number of workers per GraphEngine instance (default: 1) -GRAPH_ENGINE_MIN_WORKERS=1 -# Maximum number of workers per GraphEngine instance (default: 10) -GRAPH_ENGINE_MAX_WORKERS=10 -# Queue depth threshold that triggers worker scale up (default: 3) -GRAPH_ENGINE_SCALE_UP_THRESHOLD=3 -# Seconds of idle time before scaling down workers (default: 5.0) -GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0 - -# Workflow storage configuration -# Options: rdbms, hybrid -# rdbms: Use only the relational database (default) -# hybrid: Save new data to object storage, read from both object storage and RDBMS -WORKFLOW_NODE_EXECUTION_STORAGE=rdbms - -# Repository configuration -# Core workflow execution repository implementation -# Options: -# - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default) -# - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository -# - extensions.logstore.repositories.logstore_workflow_execution_repository.LogstoreWorkflowExecutionRepository -CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository - -# Core workflow node execution repository implementation -# Options: -# - core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository (default) -# - core.repositories.celery_workflow_node_execution_repository.CeleryWorkflowNodeExecutionRepository -# - extensions.logstore.repositories.logstore_workflow_node_execution_repository.LogstoreWorkflowNodeExecutionRepository -CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository - -# API workflow run repository implementation -# Options: -# - repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository (default) -# - extensions.logstore.repositories.logstore_api_workflow_run_repository.LogstoreAPIWorkflowRunRepository -API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository - -# API workflow node execution repository implementation -# Options: -# - repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository (default) -# - extensions.logstore.repositories.logstore_api_workflow_node_execution_repository.LogstoreAPIWorkflowNodeExecutionRepository -API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository - -# Workflow log cleanup configuration -# Enable automatic cleanup of workflow run logs to manage database size -WORKFLOW_LOG_CLEANUP_ENABLED=false -# Number of days to retain workflow run logs (default: 30 days) -WORKFLOW_LOG_RETENTION_DAYS=30 -# Batch size for workflow log cleanup operations (default: 100) -WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 -# Comma-separated list of workflow IDs to clean logs for -WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS= - -# Aliyun SLS Logstore Configuration -# Aliyun Access Key ID -ALIYUN_SLS_ACCESS_KEY_ID= -# Aliyun Access Key Secret -ALIYUN_SLS_ACCESS_KEY_SECRET= -# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com) -ALIYUN_SLS_ENDPOINT= -# Aliyun SLS Region (e.g., cn-hangzhou) -ALIYUN_SLS_REGION= -# Aliyun SLS Project Name -ALIYUN_SLS_PROJECT_NAME= -# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage) -ALIYUN_SLS_LOGSTORE_TTL=365 -# Enable dual-write to both SLS LogStore and SQL database (default: false) -LOGSTORE_DUAL_WRITE_ENABLED=false -# Enable dual-read fallback to SQL database when LogStore returns no results (default: true) -# Useful for migration scenarios where historical data exists only in SQL database -LOGSTORE_DUAL_READ_ENABLED=true -# Control flag for whether to write the `graph` field to LogStore. -# If LOGSTORE_ENABLE_PUT_GRAPH_FIELD is "true", write the full `graph` field; -# otherwise write an empty {} instead. Defaults to writing the `graph` field. -LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true - -# HTTP request node in workflow configuration -HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 -HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 -HTTP_REQUEST_NODE_SSL_VERIFY=True - -# HTTP request node timeout configuration -# Maximum timeout values (in seconds) that users can set in HTTP request nodes -# - Connect timeout: Time to wait for establishing connection (default: 10s) -# - Read timeout: Time to wait for receiving response data (default: 600s, 10 minutes) -# - Write timeout: Time to wait for sending request data (default: 600s, 10 minutes) -HTTP_REQUEST_MAX_CONNECT_TIMEOUT=10 -HTTP_REQUEST_MAX_READ_TIMEOUT=600 -HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 - -# Base64 encoded CA certificate data for custom certificate verification (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CERT_DATA=LS0tLS1CRUdJTi... -# Base64 encoded client certificate data for mutual TLS authentication (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CLIENT_CERT_DATA=LS0tLS1CRUdJTi... -# Base64 encoded client private key data for mutual TLS authentication (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CLIENT_KEY_DATA=LS0tLS1CRUdJTi... - -# Webhook request configuration -WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760 - -# Respect X-* headers to redirect clients -RESPECT_XFORWARD_HEADERS_ENABLED=false - -# SSRF Proxy server HTTP URL -SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 -# SSRF Proxy server HTTPS URL -SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 - -# Maximum loop count in the workflow -LOOP_NODE_MAX_COUNT=100 - -# The maximum number of tools that can be used in the agent. -MAX_TOOLS_NUM=10 - -# Maximum number of Parallelism branches in the workflow -MAX_PARALLEL_LIMIT=10 - -# The maximum number of iterations for agent setting -MAX_ITERATIONS_NUM=99 - -# ------------------------------ -# Environment Variables for web Service -# ------------------------------ - -# The timeout for the text generation in millisecond -TEXT_GENERATION_TIMEOUT_MS=60000 - -# Enable the experimental vinext runtime shipped in the image. -EXPERIMENTAL_ENABLE_VINEXT=false - -# Allow inline style attributes in Markdown rendering. -# Enable this if your workflows use Jinja2 templates with styled HTML. -# Only recommended for self-hosted deployments with trusted content. -ALLOW_INLINE_STYLES=false - -# Allow rendering unsafe URLs which have "data:" scheme. -ALLOW_UNSAFE_DATA_SCHEME=false - -# Maximum number of tree depth in the workflow -MAX_TREE_DEPTH=50 - -# ------------------------------ -# Environment Variables for database Service -# ------------------------------ -# Postgres data directory -PGDATA=/var/lib/postgresql/data/pgdata - -# MySQL Default Configuration -MYSQL_HOST_VOLUME=./volumes/mysql/data - -# ------------------------------ -# Environment Variables for sandbox Service -# ------------------------------ - -# The API key for the sandbox service -SANDBOX_API_KEY=dify-sandbox -# The mode in which the Gin framework runs -SANDBOX_GIN_MODE=release -# The timeout for the worker in seconds -SANDBOX_WORKER_TIMEOUT=15 -# Enable network for the sandbox service -SANDBOX_ENABLE_NETWORK=true -# HTTP proxy URL for SSRF protection -SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 -# HTTPS proxy URL for SSRF protection -SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 -# The port on which the sandbox service runs -SANDBOX_PORT=8194 - -# ------------------------------ -# Environment Variables for weaviate Service -# (only used when VECTOR_STORE is weaviate) -# ------------------------------ WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate WEAVIATE_QUERY_DEFAULTS_LIMIT=25 WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true @@ -1259,118 +167,26 @@ WEAVIATE_ENABLE_TOKENIZER_GSE=false WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false -# ------------------------------ -# Environment Variables for Chroma -# (only used when VECTOR_STORE is chroma) -# ------------------------------ - -# Authentication credentials for Chroma server -CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 -# Authentication provider for Chroma server -CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider -# Persistence setting for Chroma server -CHROMA_IS_PERSISTENT=TRUE - -# ------------------------------ -# Environment Variables for Oracle Service -# (only used when VECTOR_STORE is oracle) -# ------------------------------ -ORACLE_PWD=Dify123456 -ORACLE_CHARACTERSET=AL32UTF8 - -# ------------------------------ -# Environment Variables for milvus Service -# (only used when VECTOR_STORE is milvus) -# ------------------------------ -# ETCD configuration for auto compaction mode -ETCD_AUTO_COMPACTION_MODE=revision -# ETCD configuration for auto compaction retention in terms of number of revisions -ETCD_AUTO_COMPACTION_RETENTION=1000 -# ETCD configuration for backend quota in bytes -ETCD_QUOTA_BACKEND_BYTES=4294967296 -# ETCD configuration for the number of changes before triggering a snapshot -ETCD_SNAPSHOT_COUNT=50000 -# MinIO access key for authentication -MINIO_ACCESS_KEY=minioadmin -# MinIO secret key for authentication -MINIO_SECRET_KEY=minioadmin -# ETCD service endpoints -ETCD_ENDPOINTS=etcd:2379 -# MinIO service address -MINIO_ADDRESS=minio:9000 -# Enable or disable security authorization -MILVUS_AUTHORIZATION_ENABLED=true - -# ------------------------------ -# Environment Variables for pgvector / pgvector-rs Service -# (only used when VECTOR_STORE is pgvector / pgvector-rs) -# ------------------------------ -PGVECTOR_PGUSER=postgres -# The password for the default postgres user. -PGVECTOR_POSTGRES_PASSWORD=difyai123456 -# The name of the default postgres database. -PGVECTOR_POSTGRES_DB=dify -# postgres data directory -PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata - -# ------------------------------ -# Environment Variables for opensearch -# (only used when VECTOR_STORE is opensearch) -# ------------------------------ -OPENSEARCH_DISCOVERY_TYPE=single-node -OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true -OPENSEARCH_JAVA_OPTS_MIN=512m -OPENSEARCH_JAVA_OPTS_MAX=1024m -OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 -OPENSEARCH_MEMLOCK_SOFT=-1 -OPENSEARCH_MEMLOCK_HARD=-1 -OPENSEARCH_NOFILE_SOFT=65536 -OPENSEARCH_NOFILE_HARD=65536 - -# ------------------------------ -# Environment Variables for Nginx reverse proxy -# ------------------------------ -NGINX_SERVER_NAME=_ -NGINX_HTTPS_ENABLED=false -# HTTP port -NGINX_PORT=80 -# SSL settings are only applied when HTTPS_ENABLED is true -NGINX_SSL_PORT=443 -# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory -# and modify the env vars below accordingly. -NGINX_SSL_CERT_FILENAME=dify.crt -NGINX_SSL_CERT_KEY_FILENAME=dify.key -NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 - -# Nginx performance tuning -NGINX_WORKER_PROCESSES=auto -NGINX_CLIENT_MAX_BODY_SIZE=100M -NGINX_KEEPALIVE_TIMEOUT=65 - -# Proxy settings -NGINX_PROXY_READ_TIMEOUT=3600s -NGINX_PROXY_SEND_TIMEOUT=3600s - -# Set true to accept requests for /.well-known/acme-challenge/ -NGINX_ENABLE_CERTBOT_CHALLENGE=false - -# ------------------------------ -# Certbot Configuration -# ------------------------------ - -# Email address (required to get certificates from Let's Encrypt) -CERTBOT_EMAIL= - -# Domain name -CERTBOT_DOMAIN= - -# certbot command options -# i.e: --force-renewal --dry-run --test-cert --debug -CERTBOT_OPTIONS= - -# ------------------------------ -# Environment Variables for SSRF Proxy -# ------------------------------ +# Sandbox and SSRF proxy +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_EXECUTION_SSL_VERIFY=True +CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 +CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 +PIP_MIRROR_URL= +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 SSRF_HTTP_PORT=3128 SSRF_COREDUMP_DIR=/var/spool/squid SSRF_REVERSE_PROXY_PORT=8194 @@ -1383,67 +199,7 @@ SSRF_POOL_MAX_CONNECTIONS=100 SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 SSRF_POOL_KEEPALIVE_EXPIRY=5.0 -# ------------------------------ -# docker env var for specifying vector db and metadata db type at startup -# (based on the vector db and metadata db type, the corresponding docker -# compose profile will be used) -# if you want to use unstructured, add ',unstructured' to the end -# ------------------------------ -COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} - -# ------------------------------ -# Worker health check configuration for worker and worker_beat services. -# Set to false to enable the health check. -# Note: enabling the health check may cause periodic CPU spikes and increased load, -# as it establishes a broker connection and sends a Celery ping on every check interval. -# ------------------------------ -COMPOSE_WORKER_HEALTHCHECK_DISABLED=true -# Interval between health checks (e.g. 30s, 1m) -COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s -# Timeout for each health check (e.g. 30s, 1m) -COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s - -# ------------------------------ -# Docker Compose Service Expose Host Port Configurations -# ------------------------------ -EXPOSE_NGINX_PORT=80 -EXPOSE_NGINX_SSL_PORT=443 - -# ---------------------------------------------------------------------------- -# ModelProvider & Tool Position Configuration -# Used to specify the model providers and tools that can be used in the app. -# ---------------------------------------------------------------------------- - -# Pin, include, and exclude tools -# Use comma-separated values with no spaces between items. -# Example: POSITION_TOOL_PINS=bing,google -POSITION_TOOL_PINS= -POSITION_TOOL_INCLUDES= -POSITION_TOOL_EXCLUDES= - -# Pin, include, and exclude model providers -# Use comma-separated values with no spaces between items. -# Example: POSITION_PROVIDER_PINS=openai,openllm -POSITION_PROVIDER_PINS= -POSITION_PROVIDER_INCLUDES= -POSITION_PROVIDER_EXCLUDES= - -# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP -CSP_WHITELIST= - -# Enable or disable create tidb service job -CREATE_TIDB_SERVICE_JOB_ENABLED=false - -# Maximum number of submitted thread count in a ThreadPool for parallel node execution -MAX_SUBMIT_COUNT=100 - -# The maximum number of top-k value for RAG. -TOP_K_MAX_VALUE=10 - -# ------------------------------ -# Plugin Daemon Configuration -# ------------------------------ - +# Plugin daemon DB_PLUGIN_DATABASE=dify_plugin EXPOSE_PLUGIN_DAEMON_PORT=5002 PLUGIN_DAEMON_PORT=5002 @@ -1452,180 +208,44 @@ PLUGIN_DAEMON_URL=http://plugin_daemon:5002 PLUGIN_MAX_PACKAGE_SIZE=52428800 PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600 PLUGIN_PPROF_ENABLED=false - PLUGIN_DEBUGGING_HOST=0.0.0.0 PLUGIN_DEBUGGING_PORT=5003 EXPOSE_PLUGIN_DEBUGGING_HOST=localhost EXPOSE_PLUGIN_DEBUGGING_PORT=5003 - -# If this key is changed, DIFY_INNER_API_KEY in plugin_daemon service must also be updated or agent node will fail. PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 PLUGIN_DIFY_INNER_API_URL=http://api:5001 - -ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} - -MARKETPLACE_ENABLED=true -MARKETPLACE_API_URL=https://marketplace.dify.ai - -# Creators Platform configuration -CREATORS_PLATFORM_FEATURES_ENABLED=true -CREATORS_PLATFORM_API_URL=https://creators.dify.ai -CREATORS_PLATFORM_OAUTH_CLIENT_ID= - FORCE_VERIFYING_SIGNATURE=true -ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES=true - PLUGIN_STDIO_BUFFER_SIZE=1024 PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 - PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 -# Plugin Daemon side timeout (configure to match the API side below) PLUGIN_MAX_EXECUTION_TIMEOUT=600 -# API side timeout (configure to match the Plugin Daemon side above) -PLUGIN_DAEMON_TIMEOUT=600.0 -# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple -PIP_MIRROR_URL= - -# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example -# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss volcengine_tos PLUGIN_STORAGE_TYPE=local PLUGIN_STORAGE_LOCAL_ROOT=/app/storage PLUGIN_WORKING_PATH=/app/storage/cwd PLUGIN_INSTALLED_PATH=plugin PLUGIN_PACKAGE_CACHE_PATH=plugin_packages PLUGIN_MEDIA_CACHE_PATH=assets -# Plugin oss bucket PLUGIN_STORAGE_OSS_BUCKET= -# Plugin oss s3 credentials -PLUGIN_S3_USE_AWS=false -PLUGIN_S3_USE_AWS_MANAGED_IAM=false -PLUGIN_S3_ENDPOINT= -PLUGIN_S3_USE_PATH_STYLE=false -PLUGIN_AWS_ACCESS_KEY= -PLUGIN_AWS_SECRET_KEY= -PLUGIN_AWS_REGION= -# Plugin oss azure blob -PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= -PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= -# Plugin oss tencent cos -PLUGIN_TENCENT_COS_SECRET_KEY= -PLUGIN_TENCENT_COS_SECRET_ID= -PLUGIN_TENCENT_COS_REGION= -# Plugin oss aliyun oss -PLUGIN_ALIYUN_OSS_REGION= -PLUGIN_ALIYUN_OSS_ENDPOINT= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= -PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 -PLUGIN_ALIYUN_OSS_PATH= -# Plugin oss volcengine tos -PLUGIN_VOLCENGINE_TOS_ENDPOINT= -PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= -PLUGIN_VOLCENGINE_TOS_SECRET_KEY= -PLUGIN_VOLCENGINE_TOS_REGION= +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai +MARKETPLACE_URL= -# ------------------------------ -# OTLP Collector Configuration -# ------------------------------ -ENABLE_OTEL=false -OTLP_TRACE_ENDPOINT= -OTLP_METRIC_ENDPOINT= -OTLP_BASE_ENDPOINT=http://localhost:4318 -OTLP_API_KEY= -OTEL_EXPORTER_OTLP_PROTOCOL= -OTEL_EXPORTER_TYPE=otlp -OTEL_SAMPLING_RATE=0.1 -OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 -OTEL_MAX_QUEUE_SIZE=2048 -OTEL_MAX_EXPORT_BATCH_SIZE=512 -OTEL_METRIC_EXPORT_INTERVAL=60000 -OTEL_BATCH_EXPORT_TIMEOUT=10000 -OTEL_METRIC_EXPORT_TIMEOUT=30000 - -# Prevent Clickjacking -ALLOW_EMBED=false - -# Dataset queue monitor configuration -QUEUE_MONITOR_THRESHOLD=200 -# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai -QUEUE_MONITOR_ALERT_EMAILS= -# Monitor interval in minutes, default is 30 minutes -QUEUE_MONITOR_INTERVAL=30 - -# Swagger UI configuration -SWAGGER_UI_ENABLED=false -SWAGGER_UI_PATH=/swagger-ui.html - -# Whether to encrypt dataset IDs when exporting DSL files (default: true) -# Set to false to export dataset IDs as plain text for easier cross-environment import -DSL_EXPORT_ENCRYPT_DATASET_ID=true - -# Maximum number of segments for dataset segments API (0 for unlimited) -DATASET_MAX_SEGMENTS_PER_REQUEST=0 - -# Celery schedule tasks configuration -ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false -ENABLE_CLEAN_UNUSED_DATASETS_TASK=false -ENABLE_CREATE_TIDB_SERVERLESS_TASK=false -ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false -ENABLE_CLEAN_MESSAGES=false -ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false -ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false -ENABLE_DATASETS_QUEUE_MONITOR=false -ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true -ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true -WORKFLOW_SCHEDULE_POLLER_INTERVAL=1 -WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100 -WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0 - -# Tenant isolated task queue configuration -TENANT_ISOLATED_TASK_CONCURRENCY=1 - -# Maximum allowed CSV file size for annotation import in megabytes -ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2 -#Maximum number of annotation records allowed in a single import -ANNOTATION_IMPORT_MAX_RECORDS=10000 -# Minimum number of annotation records required in a single import -ANNOTATION_IMPORT_MIN_RECORDS=1 -ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5 -ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20 -# Maximum number of concurrent annotation import tasks per tenant -ANNOTATION_IMPORT_MAX_CONCURRENT=5 - -# The API key of amplitude -AMPLITUDE_API_KEY= - -# Sandbox expired records clean configuration -SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 -SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 -SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200 -SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 - - -# Redis URL used for event bus between API and -# celery worker -# defaults to url constructed from `REDIS_*` -# configurations -EVENT_BUS_REDIS_URL= -# Event transport type. Options are: -# -# - pubsub: normal Pub/Sub (at-most-once) -# - sharded: sharded Pub/Sub (at-most-once) -# - streams: Redis Streams (at-least-once, recommended to avoid subscriber races) -# -# Note: Before enabling 'streams' in production, estimate your expected event volume and retention needs. -# Configure Redis memory limits and stream trimming appropriately (e.g., MAXLEN and key expiry) to reduce -# the risk of data loss from Redis auto-eviction under memory pressure. -# Also accepts ENV: EVENT_BUS_REDIS_CHANNEL_TYPE. -EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub -# Whether to use Redis cluster mode while use redis as event bus. -# It's highly recommended to enable this for large deployments. -EVENT_BUS_REDIS_USE_CLUSTERS=false - -# Whether to Enable human input timeout check task -ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true -# Human input timeout check interval in minutes -HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1 - - -SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 +# Nginx and Docker Compose +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +NGINX_PORT=80 +NGINX_SSL_PORT=443 +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s +NGINX_ENABLE_CERTBOT_CHALLENGE=false +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} diff --git a/docker/.gitignore b/docker/.gitignore new file mode 100644 index 0000000000..c3a47ad592 --- /dev/null +++ b/docker/.gitignore @@ -0,0 +1,3 @@ +# Ignore actual .env files (keep only .env.example files in git) +*.env +!*.env.example diff --git a/docker/README.md b/docker/README.md index 3a7f4c2ad5..a2d9b2eeba 100644 --- a/docker/README.md +++ b/docker/README.md @@ -7,29 +7,31 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T - **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\ For more information, refer `docker/certbot/README.md`. -- **Persistent Environment Variables**: Default environment variables are managed through `.env.default`, while local overrides are stored in `.env`, ensuring that your configurations persist across deployments. +- **Persistent Environment Variables**: Essential startup defaults are provided in `.env.example`, while local values are stored in `.env`, ensuring that your configurations persist across deployments. > What is `.env`?

- > The `.env` file is a local override file. Keep it small by adding only the values that differ from `.env.default`. Use `.env.example` as the full reference when you need advanced configuration. + > The `.env` file is the local startup file. Copy it from `.env.example` for a default deployment. Optional advanced settings live in `envs/*.env.example` files. - **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file. -- **Local .env Overrides**: The `dify-compose` and `dify-compose.ps1` wrappers create `.env` if it is missing and generate a persistent `SECRET_KEY` for this deployment. - ### How to Deploy Dify with `docker-compose.yaml` 1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system. 1. **Environment Setup**: - Navigate to the `docker` directory. - - No copy step is required. The `dify-compose` wrappers create `.env` if it is missing and write a generated `SECRET_KEY` to it. - - When prompted on first run, press Enter to use the default deployment, or answer `y` to stop and edit `.env` first. - - Customize `.env` only when you need to override defaults from `.env.default`. Refer to `.env.example` for the full list of available variables. + - Copy `.env.example` to `.env`. + - Customize `.env` when you need to change essential startup defaults. Copy optional files from `envs/` without the `.example` suffix when you need advanced settings. - **Optional (for advanced deployments)**: If you maintain a full `.env` file copied from `.env.example`, you may use the environment synchronization tool to keep it aligned with the latest `.env.example` updates while preserving your custom settings. See the [Environment Variables Synchronization](#environment-variables-synchronization) section below. 1. **Running the Services**: - - Execute `./dify-compose up -d` from the `docker` directory to start the services. On Windows PowerShell, run `.\dify-compose.ps1 up -d`. + - Execute `docker compose up -d` from the `docker` directory to start the services. - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. + ```bash + cp .env.example .env + docker compose up -d + ``` + 1. **SSL Certificate Setup**: - Refer `docker/certbot/README.md` to set up SSL certificates using Certbot. 1. **OpenTelemetry Collector Setup**: @@ -41,7 +43,7 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T 1. **Middleware Setup**: - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches. - Navigate to the `docker` directory. - - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file). + - Ensure the `middleware.env` file is created by running `cp envs/middleware.env.example middleware.env` (refer to the `envs/middleware.env.example` file). 1. **Running Middleware Services**: - Navigate to the `docker` directory. - Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance. @@ -58,13 +60,13 @@ For users migrating from the `docker-legacy` setup: 1. **Data Migration**: - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary. -### Overview of `.env.default`, `.env`, and `.env.example` +### Overview of `.env`, `.env.example`, and `envs/` -- `.env.default` contains the minimal default configuration for Docker Compose deployments. -- `.env` contains the generated `SECRET_KEY` plus any local overrides. -- `.env.example` is the full reference for advanced configuration. +- `.env.example` contains the essential default configuration for Docker Compose deployments. +- `.env` contains local startup values copied from `.env.example` and any local changes. +- `envs/*.env.example` files contain optional advanced configuration grouped by theme. -The `dify-compose` wrappers merge `.env.default` and `.env` into a temporary environment file, append paired internal service keys when needed, and remove the temporary file after Docker Compose starts. +Docker Compose reads `envs/*.env` files when present, then reads `.env` last so values in `.env` take precedence. #### Key Modules and Customization @@ -74,7 +76,7 @@ The `dify-compose` wrappers merge `.env.default` and `.env` into a temporary env #### Other notable variables -The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables: +The root `.env.example` file contains the essential startup settings. Optional and provider-specific settings are grouped in `envs/*.env.example` files. Here are some of the key sections and variables: 1. **Common Variables**: @@ -102,7 +104,7 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w 1. **Storage Configuration**: - - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc. + - `STORAGE_TYPE`, `OPENDAL_SCHEME`, `OPENDAL_FS_ROOT`: Default local file storage settings. Optional storage backends are configured from the files under `envs/`. 1. **Vector Database Configuration**: @@ -124,11 +126,11 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w ### Environment Variables Synchronization -When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.default` or `.env.example`. +When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example` or the optional files under `envs/`. -If you use the default override-only workflow, review `.env.default` and add only the values you need to override to `.env`. +If you use the default workflow, review `.env.example` and keep your `.env` aligned with essential startup values. -If you maintain a full `.env` file copied from `.env.example`, an optional environment variables synchronization tool is provided. +If you maintain a customized `.env` file copied from `.env.example`, an optional environment variables synchronization tool is provided. > This tool performs a **one-way synchronization** from `.env.example` to `.env`. > Existing values in `.env` are never overwritten automatically. diff --git a/docker/dify-compose b/docker/dify-compose deleted file mode 100755 index 16bbd6b538..0000000000 --- a/docker/dify-compose +++ /dev/null @@ -1,334 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -cd "$SCRIPT_DIR" - -DEFAULT_ENV_FILE=".env.default" -USER_ENV_FILE=".env" - -log() { - printf '%s\n' "$*" >&2 -} - -die() { - printf 'Error: %s\n' "$*" >&2 - exit 1 -} - -detect_compose() { - if docker compose version >/dev/null 2>&1; then - COMPOSE_CMD=(docker compose) - return - fi - - if command -v docker-compose >/dev/null 2>&1; then - COMPOSE_CMD=(docker-compose) - return - fi - - die "Docker Compose is not available. Install Docker Compose, then run this command again." -} - -generate_secret_key() { - if command -v openssl >/dev/null 2>&1; then - openssl rand -base64 42 - return - fi - - if command -v dd >/dev/null 2>&1 && command -v base64 >/dev/null 2>&1; then - dd if=/dev/urandom bs=42 count=1 2>/dev/null | base64 | tr -d '\n' - printf '\n' - return - fi - - return 1 -} - -ensure_env_files() { - [[ -f "$DEFAULT_ENV_FILE" ]] || die "$DEFAULT_ENV_FILE is missing." - - if [[ -f "$USER_ENV_FILE" ]]; then - return - fi - - : >"$USER_ENV_FILE" - - if [[ ! -t 0 ]]; then - log "Created $USER_ENV_FILE for local overrides." - return - fi - - printf 'Created %s for local overrides.\n' "$USER_ENV_FILE" - printf 'Do you need a custom deployment now? (Most users can press Enter to skip.) [y/N] ' - read -r answer - - case "${answer:-}" in - y | Y | yes | YES | Yes) - cat <<'EOF' -Edit .env with the settings you want to override, using .env.example as the full reference. -Run ./dify-compose up -d again when you are ready. -EOF - exit 0 - ;; - esac -} - -user_env_value() { - local key="$1" - awk -F= -v target="$key" ' - /^[[:space:]]*#/ || !/=/{ next } - { - key = $1 - gsub(/^[[:space:]]+|[[:space:]]+$/, "", key) - if (key == target) { - value = substr($0, index($0, "=") + 1) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", value) - if ((value ~ /^".*"$/) || (value ~ /^'\''.*'\''$/)) { - value = substr(value, 2, length(value) - 2) - } - result = value - } - } - END { print result } - ' "$USER_ENV_FILE" -} - -set_user_env_value() { - local key="$1" - local value="$2" - local temp_file - - temp_file="$(mktemp "${TMPDIR:-/tmp}/dify-env.XXXXXX")" - awk -F= -v target="$key" -v replacement="$key=$value" ' - BEGIN { replaced = 0 } - /^[[:space:]]*#/ || !/=/{ print; next } - { - key = $1 - gsub(/^[[:space:]]+|[[:space:]]+$/, "", key) - if (key == target) { - if (!replaced) { - print replacement - replaced = 1 - } - next - } - print - } - END { - if (!replaced) { - print replacement - } - } - ' "$USER_ENV_FILE" >"$temp_file" - mv "$temp_file" "$USER_ENV_FILE" -} - -ensure_secret_key() { - local current_secret_key - local secret_key - - current_secret_key="$(user_env_value SECRET_KEY)" - if [[ -n "$current_secret_key" ]]; then - return - fi - - secret_key="$(generate_secret_key)" || die "Unable to generate SECRET_KEY. Install openssl or configure SECRET_KEY in .env." - set_user_env_value SECRET_KEY "$secret_key" - log "Generated SECRET_KEY in $USER_ENV_FILE." -} - -env_value() { - local key="$1" - awk -F= -v target="$key" ' - /^[[:space:]]*#/ || !/=/{ next } - { - key = $1 - gsub(/^[[:space:]]+|[[:space:]]+$/, "", key) - if (key == target) { - value = substr($0, index($0, "=") + 1) - gsub(/^[[:space:]]+|[[:space:]]+$/, "", value) - if ((value ~ /^".*"$/) || (value ~ /^'\''.*'\''$/)) { - value = substr(value, 2, length(value) - 2) - } - result = value - } - } - END { print result } - ' "$DEFAULT_ENV_FILE" "$USER_ENV_FILE" -} - -user_overrides() { - local key="$1" - grep -Eq "^[[:space:]]*${key}[[:space:]]*=" "$USER_ENV_FILE" -} - -write_merged_env() { - awk ' - function trim(s) { - sub(/^[[:space:]]+/, "", s) - sub(/[[:space:]]+$/, "", s) - return s - } - - /^[[:space:]]*#/ || !/=/{ next } - - { - key = $0 - sub(/=.*/, "", key) - key = trim(key) - if (key == "") { - next - } - - value = substr($0, index($0, "=") + 1) - value = trim(value) - - if (!(key in seen)) { - order[++count] = key - seen[key] = 1 - } - - values[key] = value - } - - END { - for (i = 1; i <= count; i++) { - key = order[i] - print key "=" values[key] - } - } - ' "$DEFAULT_ENV_FILE" "$USER_ENV_FILE" >"$MERGED_ENV_FILE" -} - -set_merged_env_value() { - local key="$1" - local value="$2" - local temp_file - - temp_file="$(mktemp "${TMPDIR:-/tmp}/dify-compose-env.XXXXXX")" - awk -F= -v target="$key" -v replacement="$key=$value" ' - BEGIN { replaced = 0 } - /^[[:space:]]*#/ || !/=/{ print; next } - { - key = $1 - gsub(/^[[:space:]]+|[[:space:]]+$/, "", key) - if (key == target) { - if (!replaced) { - print replacement - replaced = 1 - } - next - } - print - } - END { - if (!replaced) { - print replacement - } - } - ' "$MERGED_ENV_FILE" >"$temp_file" - mv "$temp_file" "$MERGED_ENV_FILE" -} - -set_if_not_overridden() { - local key="$1" - local value="$2" - - if user_overrides "$key"; then - return - fi - - set_merged_env_value "$key" "$value" -} - -metadata_db_host() { - case "$1" in - mysql) printf 'db_mysql' ;; - postgresql | '') printf 'db_postgres' ;; - *) printf '%s' "$(env_value DB_HOST)" ;; - esac -} - -metadata_db_port() { - case "$1" in - mysql) printf '3306' ;; - postgresql | '') printf '5432' ;; - *) printf '%s' "$(env_value DB_PORT)" ;; - esac -} - -metadata_db_user() { - case "$1" in - mysql) printf 'root' ;; - postgresql | '') printf 'postgres' ;; - *) printf '%s' "$(env_value DB_USERNAME)" ;; - esac -} - -build_merged_env() { - MERGED_ENV_FILE="$(mktemp "${TMPDIR:-/tmp}/dify-compose.XXXXXX")" - trap 'rm -f "$MERGED_ENV_FILE"' EXIT - - write_merged_env - - local db_type - local redis_host - local redis_port - local redis_username - local redis_password - local redis_auth - local code_execution_api_key - local weaviate_api_key - - db_type="$(env_value DB_TYPE)" - - set_if_not_overridden DB_HOST "$(metadata_db_host "$db_type")" - set_if_not_overridden DB_PORT "$(metadata_db_port "$db_type")" - set_if_not_overridden DB_USERNAME "$(metadata_db_user "$db_type")" - - if ! user_overrides CELERY_BROKER_URL; then - redis_host="$(env_value REDIS_HOST)" - redis_port="$(env_value REDIS_PORT)" - redis_username="$(env_value REDIS_USERNAME)" - redis_password="$(env_value REDIS_PASSWORD)" - redis_auth="" - - if [[ -n "$redis_username" && -n "$redis_password" ]]; then - redis_auth="${redis_username}:${redis_password}@" - elif [[ -n "$redis_password" ]]; then - redis_auth=":${redis_password}@" - elif [[ -n "$redis_username" ]]; then - redis_auth="${redis_username}@" - fi - - set_merged_env_value CELERY_BROKER_URL "redis://${redis_auth}${redis_host:-redis}:${redis_port:-6379}/1" - fi - - if ! user_overrides SANDBOX_API_KEY; then - code_execution_api_key="$(env_value CODE_EXECUTION_API_KEY)" - set_if_not_overridden SANDBOX_API_KEY "${code_execution_api_key:-dify-sandbox}" - fi - - if ! user_overrides WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS; then - weaviate_api_key="$(env_value WEAVIATE_API_KEY)" - set_if_not_overridden WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS \ - "${weaviate_api_key:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}" - fi -} - -main() { - detect_compose - ensure_env_files - ensure_secret_key - build_merged_env - - if [[ "$#" -eq 0 ]]; then - set -- up -d - fi - - "${COMPOSE_CMD[@]}" --env-file "$MERGED_ENV_FILE" "$@" -} - -main "$@" diff --git a/docker/dify-compose.ps1 b/docker/dify-compose.ps1 deleted file mode 100644 index 851f8b76fe..0000000000 --- a/docker/dify-compose.ps1 +++ /dev/null @@ -1,317 +0,0 @@ -$ErrorActionPreference = "Stop" -Set-StrictMode -Version Latest - -$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path -Set-Location $ScriptDir - -$DefaultEnvFile = ".env.default" -$UserEnvFile = ".env" -$MergedEnvFile = $null -$Utf8NoBom = New-Object System.Text.UTF8Encoding -ArgumentList $false - -function Write-Info { - param([string]$Message) - [Console]::Error.WriteLine($Message) -} - -function Fail { - param([string]$Message) - [Console]::Error.WriteLine("Error: $Message") - exit 1 -} - -function Test-CommandSuccess { - param([string[]]$Command) - - try { - $Executable = $Command[0] - $CommandArgs = @() - if ($Command.Length -gt 1) { - $CommandArgs = @($Command[1..($Command.Length - 1)]) - } - - & $Executable @CommandArgs *> $null - return $LASTEXITCODE -eq 0 - } - catch { - return $false - } -} - -function Get-ComposeCommand { - if (Test-CommandSuccess @("docker", "compose", "version")) { - return @("docker", "compose") - } - - if ((Get-Command "docker-compose" -ErrorAction SilentlyContinue) -and (Test-CommandSuccess @("docker-compose", "version"))) { - return @("docker-compose") - } - - Fail "Docker Compose is not available. Install Docker Compose, then run this command again." -} - -function New-SecretKey { - $Bytes = New-Object byte[] 42 - $Generator = [System.Security.Cryptography.RandomNumberGenerator]::Create() - - try { - $Generator.GetBytes($Bytes) - } - finally { - $Generator.Dispose() - } - - return [Convert]::ToBase64String($Bytes) -} - -function Ensure-EnvFiles { - if (-not (Test-Path $DefaultEnvFile -PathType Leaf)) { - Fail "$DefaultEnvFile is missing." - } - - if (Test-Path $UserEnvFile -PathType Leaf) { - return - } - - New-Item -ItemType File -Path $UserEnvFile | Out-Null - - if ([Console]::IsInputRedirected) { - Write-Info "Created $UserEnvFile for local overrides." - return - } - - Write-Info "Created $UserEnvFile for local overrides." - $Answer = Read-Host "Do you need a custom deployment now? (Most users can press Enter to skip.) [y/N]" - - if ($Answer -match "^(y|yes)$") { - Write-Output "Edit .env with the settings you want to override, using .env.example as the full reference." - Write-Output "Run .\dify-compose.ps1 up -d again when you are ready." - exit 0 - } -} - -function Read-EnvFile { - param([string]$Path) - - $Values = [ordered]@{} - - if (-not (Test-Path $Path -PathType Leaf)) { - return $Values - } - - foreach ($Line in Get-Content -Path $Path) { - if ($Line -match "^\s*#" -or $Line -notmatch "=") { - continue - } - - $SeparatorIndex = $Line.IndexOf("=") - $Key = $Line.Substring(0, $SeparatorIndex).Trim() - $Value = $Line.Substring($SeparatorIndex + 1).Trim() - - if (($Value.StartsWith('"') -and $Value.EndsWith('"')) -or ($Value.StartsWith("'") -and $Value.EndsWith("'"))) { - $Value = $Value.Substring(1, $Value.Length - 2) - } - - if ($Key.Length -gt 0) { - $Values[$Key] = $Value - } - } - - return $Values -} - -function Set-UserEnvValue { - param( - [string]$Key, - [string]$Value - ) - - $Path = [string](Resolve-Path $UserEnvFile) - $Lines = [System.IO.File]::ReadAllLines($Path, [System.Text.Encoding]::UTF8) - $Output = New-Object System.Collections.Generic.List[string] - $Replaced = $false - - foreach ($Line in $Lines) { - if ($Line -match "^\s*#" -or $Line -notmatch "=") { - $Output.Add($Line) - continue - } - - $SeparatorIndex = $Line.IndexOf("=") - $CurrentKey = $Line.Substring(0, $SeparatorIndex).Trim() - - if ($CurrentKey -eq $Key) { - if (-not $Replaced) { - $Output.Add("$Key=$Value") - $Replaced = $true - } - continue - } - - $Output.Add($Line) - } - - if (-not $Replaced) { - $Output.Add("$Key=$Value") - } - - [System.IO.File]::WriteAllLines($Path, $Output, $Utf8NoBom) -} - -function Ensure-SecretKey { - $Values = Read-EnvFile $UserEnvFile - - if ($Values.Contains("SECRET_KEY") -and $Values["SECRET_KEY"]) { - return - } - - Set-UserEnvValue "SECRET_KEY" (New-SecretKey) - Write-Info "Generated SECRET_KEY in $UserEnvFile." -} - -function Merge-EnvValues { - $Values = [ordered]@{} - - foreach ($Entry in (Read-EnvFile $DefaultEnvFile).GetEnumerator()) { - $Values[$Entry.Key] = $Entry.Value - } - - foreach ($Entry in (Read-EnvFile $UserEnvFile).GetEnumerator()) { - $Values[$Entry.Key] = $Entry.Value - } - - return $Values -} - -function User-Overrides { - param([string]$Key) - - if (-not (Test-Path $UserEnvFile -PathType Leaf)) { - return $false - } - - return [bool](Select-String -Path $UserEnvFile -Pattern "^\s*$([regex]::Escape($Key))\s*=" -Quiet) -} - -function Metadata-DbHost { - param([string]$DbType, $Values) - - switch ($DbType) { - "mysql" { return "db_mysql" } - "postgresql" { return "db_postgres" } - "" { return "db_postgres" } - default { return $Values["DB_HOST"] } - } -} - -function Metadata-DbPort { - param([string]$DbType, $Values) - - switch ($DbType) { - "mysql" { return "3306" } - "postgresql" { return "5432" } - "" { return "5432" } - default { return $Values["DB_PORT"] } - } -} - -function Metadata-DbUser { - param([string]$DbType, $Values) - - switch ($DbType) { - "mysql" { return "root" } - "postgresql" { return "postgres" } - "" { return "postgres" } - default { return $Values["DB_USERNAME"] } - } -} - -function Write-MergedEnv { - param($Values) - - $Output = New-Object System.Collections.Generic.List[string] - - foreach ($Entry in $Values.GetEnumerator()) { - $Output.Add("$($Entry.Key)=$($Entry.Value)") - } - - [System.IO.File]::WriteAllLines($MergedEnvFile, $Output, $Utf8NoBom) -} - -function Build-MergedEnv { - $Values = Merge-EnvValues - $script:MergedEnvFile = [System.IO.Path]::GetTempFileName() - - $DbType = if ($Values.Contains("DB_TYPE")) { $Values["DB_TYPE"] } else { "postgresql" } - - if (-not (User-Overrides "DB_HOST")) { - $Values["DB_HOST"] = Metadata-DbHost $DbType $Values - } - - if (-not (User-Overrides "DB_PORT")) { - $Values["DB_PORT"] = Metadata-DbPort $DbType $Values - } - - if (-not (User-Overrides "DB_USERNAME")) { - $Values["DB_USERNAME"] = Metadata-DbUser $DbType $Values - } - - if (-not (User-Overrides "CELERY_BROKER_URL")) { - $RedisHost = if ($Values.Contains("REDIS_HOST") -and $Values["REDIS_HOST"]) { $Values["REDIS_HOST"] } else { "redis" } - $RedisPort = if ($Values.Contains("REDIS_PORT") -and $Values["REDIS_PORT"]) { $Values["REDIS_PORT"] } else { "6379" } - $RedisUsername = if ($Values.Contains("REDIS_USERNAME")) { $Values["REDIS_USERNAME"] } else { "" } - $RedisPassword = if ($Values.Contains("REDIS_PASSWORD")) { $Values["REDIS_PASSWORD"] } else { "" } - $RedisAuth = "" - - if ($RedisUsername -and $RedisPassword) { - $RedisAuth = "${RedisUsername}:${RedisPassword}@" - } - elseif ($RedisPassword) { - $RedisAuth = ":${RedisPassword}@" - } - elseif ($RedisUsername) { - $RedisAuth = "${RedisUsername}@" - } - - $Values["CELERY_BROKER_URL"] = "redis://$RedisAuth${RedisHost}:${RedisPort}/1" - } - - if (-not (User-Overrides "SANDBOX_API_KEY")) { - $CodeExecutionApiKey = if ($Values.Contains("CODE_EXECUTION_API_KEY") -and $Values["CODE_EXECUTION_API_KEY"]) { $Values["CODE_EXECUTION_API_KEY"] } else { "dify-sandbox" } - $Values["SANDBOX_API_KEY"] = $CodeExecutionApiKey - } - - if (-not (User-Overrides "WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS")) { - $WeaviateApiKey = if ($Values.Contains("WEAVIATE_API_KEY") -and $Values["WEAVIATE_API_KEY"]) { $Values["WEAVIATE_API_KEY"] } else { "WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih" } - $Values["WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS"] = $WeaviateApiKey - } - - Write-MergedEnv $Values -} - -$ComposeCommand = Get-ComposeCommand - -try { - Ensure-EnvFiles - Ensure-SecretKey - Build-MergedEnv - - $ComposeArgs = @($args) - if ($ComposeArgs.Count -eq 0) { - $ComposeArgs = @("up", "-d") - } - - $ComposeCommandArgs = @() - if ($ComposeCommand.Length -gt 1) { - $ComposeCommandArgs = @($ComposeCommand[1..($ComposeCommand.Length - 1)]) - } - - $ComposeExecutable = $ComposeCommand[0] - & $ComposeExecutable @ComposeCommandArgs --env-file $MergedEnvFile @ComposeArgs - exit $LASTEXITCODE -} -finally { - if ($MergedEnvFile -and (Test-Path $MergedEnvFile -PathType Leaf)) { - Remove-Item -Force $MergedEnvFile - } -} diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index b2df61ebb2..0f65c38098 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -1,4 +1,202 @@ -x-shared-env: &shared-api-worker-env +# Shared configuration using YAML anchors and env_file +x-shared-api-worker-config: &shared-api-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/api.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-config: &shared-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-beat-config: &shared-worker-beat-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker-beat.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + services: # Init container to fix permissions init_permissions: @@ -21,12 +219,9 @@ services: # API service api: + <<: *shared-api-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'api' starts the API server. MODE: api SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -69,12 +264,9 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: + <<: *shared-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker' starts the Celery worker for processing all queues. MODE: worker SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -115,12 +307,9 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: + <<: *shared-worker-beat-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. MODE: beat depends_on: init_permissions: @@ -154,6 +343,12 @@ services: web: image: langgenius/dify-web:1.14.0 restart: always + env_file: + - path: ./envs/core-services/web.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} @@ -228,7 +423,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -270,6 +465,12 @@ services: sandbox: image: langgenius/dify-sandbox:0.2.15 restart: always + env_file: + - path: ./envs/core-services/sandbox.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: # The DifySandbox configurations # Make sure you are changing this key for your deployment with a strong key. @@ -294,9 +495,24 @@ services: plugin_daemon: image: langgenius/dify-plugin-daemon:0.6.0-local restart: always + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/plugin-daemon.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default environment: - # Use the shared environment variables. - <<: *shared-api-worker-env DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} DB_SSL_MODE: ${DB_SSL_MODE:-disable} SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index 23c26c6695..0ad406a63b 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -51,7 +51,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 6dcab4a9fc..0f8458a58f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -4,724 +4,204 @@ # or docker-compose-template.yaml and regenerate this file. # ================================================================== -x-shared-env: &shared-api-worker-env - CONSOLE_API_URL: ${CONSOLE_API_URL:-} - CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} - SERVICE_API_URL: ${SERVICE_API_URL:-} - TRIGGER_URL: ${TRIGGER_URL:-http://localhost} - APP_API_URL: ${APP_API_URL:-} - APP_WEB_URL: ${APP_WEB_URL:-} - FILES_URL: ${FILES_URL:-} - INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-} - LANG: ${LANG:-C.UTF-8} - LC_ALL: ${LC_ALL:-C.UTF-8} - PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8} - UV_CACHE_DIR: ${UV_CACHE_DIR:-/tmp/.uv-cache} - LOG_LEVEL: ${LOG_LEVEL:-INFO} - LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text} - LOG_FILE: ${LOG_FILE:-/app/logs/server.log} - LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} - LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} - LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} - LOG_TZ: ${LOG_TZ:-UTC} - DEBUG: ${DEBUG:-false} - FLASK_DEBUG: ${FLASK_DEBUG:-false} - ENABLE_REQUEST_LOGGING: ${ENABLE_REQUEST_LOGGING:-False} - SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} - INIT_PASSWORD: ${INIT_PASSWORD:-} - DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} - CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} - OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} - MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} - FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} - ENABLE_COLLABORATION_MODE: ${ENABLE_COLLABORATION_MODE:-false} - ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} - REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} - APP_DEFAULT_ACTIVE_REQUESTS: ${APP_DEFAULT_ACTIVE_REQUESTS:-0} - APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} - APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} - DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} - DIFY_PORT: ${DIFY_PORT:-5001} - SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} - SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} - SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} - CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} - GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} - CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-4} - CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} - CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} - CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} - API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} - API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} - ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} - ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} - ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} - NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false} - DB_TYPE: ${DB_TYPE:-postgresql} - DB_USERNAME: ${DB_USERNAME:-postgres} - DB_PASSWORD: ${DB_PASSWORD:-difyai123456} - DB_HOST: ${DB_HOST:-db_postgres} - DB_PORT: ${DB_PORT:-5432} - DB_DATABASE: ${DB_DATABASE:-dify} - SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} - SQLALCHEMY_MAX_OVERFLOW: ${SQLALCHEMY_MAX_OVERFLOW:-10} - SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} - SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} - SQLALCHEMY_POOL_PRE_PING: ${SQLALCHEMY_POOL_PRE_PING:-false} - SQLALCHEMY_POOL_USE_LIFO: ${SQLALCHEMY_POOL_USE_LIFO:-false} - SQLALCHEMY_POOL_TIMEOUT: ${SQLALCHEMY_POOL_TIMEOUT:-30} - POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-200} - POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} - POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} - POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} - POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} - POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-0} - POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0} - MYSQL_MAX_CONNECTIONS: ${MYSQL_MAX_CONNECTIONS:-1000} - MYSQL_INNODB_BUFFER_POOL_SIZE: ${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} - MYSQL_INNODB_LOG_FILE_SIZE: ${MYSQL_INNODB_LOG_FILE_SIZE:-128M} - MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT: ${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} - REDIS_HOST: ${REDIS_HOST:-redis} - REDIS_PORT: ${REDIS_PORT:-6379} - REDIS_USERNAME: ${REDIS_USERNAME:-} - REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} - REDIS_USE_SSL: ${REDIS_USE_SSL:-false} - REDIS_SSL_CERT_REQS: ${REDIS_SSL_CERT_REQS:-CERT_NONE} - REDIS_SSL_CA_CERTS: ${REDIS_SSL_CA_CERTS:-} - REDIS_SSL_CERTFILE: ${REDIS_SSL_CERTFILE:-} - REDIS_SSL_KEYFILE: ${REDIS_SSL_KEYFILE:-} - REDIS_DB: ${REDIS_DB:-0} - REDIS_KEY_PREFIX: ${REDIS_KEY_PREFIX:-} - REDIS_MAX_CONNECTIONS: ${REDIS_MAX_CONNECTIONS:-} - REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} - REDIS_SENTINELS: ${REDIS_SENTINELS:-} - REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} - REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} - REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} - REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} - REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} - REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} - REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} - REDIS_RETRY_RETRIES: ${REDIS_RETRY_RETRIES:-3} - REDIS_RETRY_BACKOFF_BASE: ${REDIS_RETRY_BACKOFF_BASE:-1.0} - REDIS_RETRY_BACKOFF_CAP: ${REDIS_RETRY_BACKOFF_CAP:-10.0} - REDIS_SOCKET_TIMEOUT: ${REDIS_SOCKET_TIMEOUT:-5.0} - REDIS_SOCKET_CONNECT_TIMEOUT: ${REDIS_SOCKET_CONNECT_TIMEOUT:-5.0} - REDIS_HEALTH_CHECK_INTERVAL: ${REDIS_HEALTH_CHECK_INTERVAL:-30} - CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} - CELERY_BACKEND: ${CELERY_BACKEND:-redis} - BROKER_USE_SSL: ${BROKER_USE_SSL:-false} - CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} - CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} - CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-} - CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} - CELERY_TASK_ANNOTATIONS: ${CELERY_TASK_ANNOTATIONS:-null} - WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} - CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} - COOKIE_DOMAIN: ${COOKIE_DOMAIN:-} - NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} - NEXT_PUBLIC_SOCKET_URL: ${NEXT_PUBLIC_SOCKET_URL:-ws://localhost} - NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5} - STORAGE_TYPE: ${STORAGE_TYPE:-opendal} - OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} - OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} - CLICKZETTA_VOLUME_TYPE: ${CLICKZETTA_VOLUME_TYPE:-user} - CLICKZETTA_VOLUME_NAME: ${CLICKZETTA_VOLUME_NAME:-} - CLICKZETTA_VOLUME_TABLE_PREFIX: ${CLICKZETTA_VOLUME_TABLE_PREFIX:-dataset_} - CLICKZETTA_VOLUME_DIFY_PREFIX: ${CLICKZETTA_VOLUME_DIFY_PREFIX:-dify_km} - S3_ENDPOINT: ${S3_ENDPOINT:-} - S3_REGION: ${S3_REGION:-us-east-1} - S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} - S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} - S3_SECRET_KEY: ${S3_SECRET_KEY:-} - S3_ADDRESS_STYLE: ${S3_ADDRESS_STYLE:-auto} - S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} - ARCHIVE_STORAGE_ENABLED: ${ARCHIVE_STORAGE_ENABLED:-false} - ARCHIVE_STORAGE_ENDPOINT: ${ARCHIVE_STORAGE_ENDPOINT:-} - ARCHIVE_STORAGE_ARCHIVE_BUCKET: ${ARCHIVE_STORAGE_ARCHIVE_BUCKET:-} - ARCHIVE_STORAGE_EXPORT_BUCKET: ${ARCHIVE_STORAGE_EXPORT_BUCKET:-} - ARCHIVE_STORAGE_ACCESS_KEY: ${ARCHIVE_STORAGE_ACCESS_KEY:-} - ARCHIVE_STORAGE_SECRET_KEY: ${ARCHIVE_STORAGE_SECRET_KEY:-} - ARCHIVE_STORAGE_REGION: ${ARCHIVE_STORAGE_REGION:-auto} - AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} - AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} - AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} - AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} - GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} - GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} - ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} - ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} - ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} - ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} - ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} - ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} - ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} - TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} - TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} - TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} - TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} - TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} - TENCENT_COS_CUSTOM_DOMAIN: ${TENCENT_COS_CUSTOM_DOMAIN:-your-custom-domain} - OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com} - OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} - OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} - OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} - OCI_REGION: ${OCI_REGION:-us-ashburn-1} - HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} - HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} - HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} - HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} - HUAWEI_OBS_PATH_STYLE: ${HUAWEI_OBS_PATH_STYLE:-false} - VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} - VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} - VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} - VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} - VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} - BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} - BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} - BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} - BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} - SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} - SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} - SUPABASE_URL: ${SUPABASE_URL:-your-server-url} - VECTOR_STORE: ${VECTOR_STORE:-weaviate} - VECTOR_INDEX_NAME_PREFIX: ${VECTOR_INDEX_NAME_PREFIX:-Vector_index} - WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} - WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} - WEAVIATE_GRPC_ENDPOINT: ${WEAVIATE_GRPC_ENDPOINT:-grpc://weaviate:50051} - WEAVIATE_TOKENIZATION: ${WEAVIATE_TOKENIZATION:-word} - OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} - OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} - OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} - OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} - OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} - OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} - OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} - OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false} - OCEANBASE_FULLTEXT_PARSER: ${OCEANBASE_FULLTEXT_PARSER:-ik} - SEEKDB_MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G} - QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} - QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} - QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} - QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} - QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} - QDRANT_REPLICATION_FACTOR: ${QDRANT_REPLICATION_FACTOR:-1} - MILVUS_URI: ${MILVUS_URI:-http://host.docker.internal:19530} - MILVUS_DATABASE: ${MILVUS_DATABASE:-} - MILVUS_TOKEN: ${MILVUS_TOKEN:-} - MILVUS_USER: ${MILVUS_USER:-} - MILVUS_PASSWORD: ${MILVUS_PASSWORD:-} - MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} - MILVUS_ANALYZER_PARAMS: ${MILVUS_ANALYZER_PARAMS:-} - MYSCALE_HOST: ${MYSCALE_HOST:-myscale} - MYSCALE_PORT: ${MYSCALE_PORT:-8123} - MYSCALE_USER: ${MYSCALE_USER:-default} - MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} - MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} - MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} - COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} - COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} - COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} - COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} - COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} - HOLOGRES_HOST: ${HOLOGRES_HOST:-} - HOLOGRES_PORT: ${HOLOGRES_PORT:-80} - HOLOGRES_DATABASE: ${HOLOGRES_DATABASE:-} - HOLOGRES_ACCESS_KEY_ID: ${HOLOGRES_ACCESS_KEY_ID:-} - HOLOGRES_ACCESS_KEY_SECRET: ${HOLOGRES_ACCESS_KEY_SECRET:-} - HOLOGRES_SCHEMA: ${HOLOGRES_SCHEMA:-public} - HOLOGRES_TOKENIZER: ${HOLOGRES_TOKENIZER:-jieba} - HOLOGRES_DISTANCE_METHOD: ${HOLOGRES_DISTANCE_METHOD:-Cosine} - HOLOGRES_BASE_QUANTIZATION_TYPE: ${HOLOGRES_BASE_QUANTIZATION_TYPE:-rabitq} - HOLOGRES_MAX_DEGREE: ${HOLOGRES_MAX_DEGREE:-64} - HOLOGRES_EF_CONSTRUCTION: ${HOLOGRES_EF_CONSTRUCTION:-400} - PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} - PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} - PGVECTOR_USER: ${PGVECTOR_USER:-postgres} - PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} - PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} - PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} - PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} - PGVECTOR_PG_BIGM: ${PGVECTOR_PG_BIGM:-false} - PGVECTOR_PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606} - VASTBASE_HOST: ${VASTBASE_HOST:-vastbase} - VASTBASE_PORT: ${VASTBASE_PORT:-5432} - VASTBASE_USER: ${VASTBASE_USER:-dify} - VASTBASE_PASSWORD: ${VASTBASE_PASSWORD:-Difyai123456} - VASTBASE_DATABASE: ${VASTBASE_DATABASE:-dify} - VASTBASE_MIN_CONNECTION: ${VASTBASE_MIN_CONNECTION:-1} - VASTBASE_MAX_CONNECTION: ${VASTBASE_MAX_CONNECTION:-5} - PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} - PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} - PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} - PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} - PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} - ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} - ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} - ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} - ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} - ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} - ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} - ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} - ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} - ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} - ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} - ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} - ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} - TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} - TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} - TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} - TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} - TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} - MATRIXONE_HOST: ${MATRIXONE_HOST:-matrixone} - MATRIXONE_PORT: ${MATRIXONE_PORT:-6001} - MATRIXONE_USER: ${MATRIXONE_USER:-dump} - MATRIXONE_PASSWORD: ${MATRIXONE_PASSWORD:-111} - MATRIXONE_DATABASE: ${MATRIXONE_DATABASE:-dify} - TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} - TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} - TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} - TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} - TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} - TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} - TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} - TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} - TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} - TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} - TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} - TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} - CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} - CHROMA_PORT: ${CHROMA_PORT:-8000} - CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} - CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} - CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} - CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} - ORACLE_USER: ${ORACLE_USER:-dify} - ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} - ORACLE_DSN: ${ORACLE_DSN:-oracle:1521/FREEPDB1} - ORACLE_CONFIG_DIR: ${ORACLE_CONFIG_DIR:-/app/api/storage/wallet} - ORACLE_WALLET_LOCATION: ${ORACLE_WALLET_LOCATION:-/app/api/storage/wallet} - ORACLE_WALLET_PASSWORD: ${ORACLE_WALLET_PASSWORD:-dify} - ORACLE_IS_AUTONOMOUS: ${ORACLE_IS_AUTONOMOUS:-false} - ALIBABACLOUD_MYSQL_HOST: ${ALIBABACLOUD_MYSQL_HOST:-127.0.0.1} - ALIBABACLOUD_MYSQL_PORT: ${ALIBABACLOUD_MYSQL_PORT:-3306} - ALIBABACLOUD_MYSQL_USER: ${ALIBABACLOUD_MYSQL_USER:-root} - ALIBABACLOUD_MYSQL_PASSWORD: ${ALIBABACLOUD_MYSQL_PASSWORD:-difyai123456} - ALIBABACLOUD_MYSQL_DATABASE: ${ALIBABACLOUD_MYSQL_DATABASE:-dify} - ALIBABACLOUD_MYSQL_MAX_CONNECTION: ${ALIBABACLOUD_MYSQL_MAX_CONNECTION:-5} - ALIBABACLOUD_MYSQL_HNSW_M: ${ALIBABACLOUD_MYSQL_HNSW_M:-6} - RELYT_HOST: ${RELYT_HOST:-db} - RELYT_PORT: ${RELYT_PORT:-5432} - RELYT_USER: ${RELYT_USER:-postgres} - RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} - RELYT_DATABASE: ${RELYT_DATABASE:-postgres} - OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} - OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} - OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} - OPENSEARCH_VERIFY_CERTS: ${OPENSEARCH_VERIFY_CERTS:-true} - OPENSEARCH_AUTH_METHOD: ${OPENSEARCH_AUTH_METHOD:-basic} - OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} - OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} - OPENSEARCH_AWS_REGION: ${OPENSEARCH_AWS_REGION:-ap-southeast-1} - OPENSEARCH_AWS_SERVICE: ${OPENSEARCH_AWS_SERVICE:-aoss} - TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} - TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} - TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} - TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} - TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} - TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} - TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} - TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH: ${TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH:-false} - ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} - ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} - ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} - ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} - KIBANA_PORT: ${KIBANA_PORT:-5601} - ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false} - ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL} - ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY} - ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False} - ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-} - ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000} - ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True} - ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10} - BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} - BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} - BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} - BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} - BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} - BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} - BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} - BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: ${BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER:-DEFAULT_ANALYZER} - BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: ${BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE:-COARSE_MODE} - BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT:-500} - BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO:-0.05} - BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS: ${BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS:-300} - VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} - VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} - VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} - VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} - VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} - VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} - VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} - LINDORM_URL: ${LINDORM_URL:-http://localhost:30070} - LINDORM_USERNAME: ${LINDORM_USERNAME:-admin} - LINDORM_PASSWORD: ${LINDORM_PASSWORD:-admin} - LINDORM_USING_UGC: ${LINDORM_USING_UGC:-True} - LINDORM_QUERY_TIMEOUT: ${LINDORM_QUERY_TIMEOUT:-1} - OPENGAUSS_HOST: ${OPENGAUSS_HOST:-opengauss} - OPENGAUSS_PORT: ${OPENGAUSS_PORT:-6600} - OPENGAUSS_USER: ${OPENGAUSS_USER:-postgres} - OPENGAUSS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123} - OPENGAUSS_DATABASE: ${OPENGAUSS_DATABASE:-dify} - OPENGAUSS_MIN_CONNECTION: ${OPENGAUSS_MIN_CONNECTION:-1} - OPENGAUSS_MAX_CONNECTION: ${OPENGAUSS_MAX_CONNECTION:-5} - OPENGAUSS_ENABLE_PQ: ${OPENGAUSS_ENABLE_PQ:-false} - HUAWEI_CLOUD_HOSTS: ${HUAWEI_CLOUD_HOSTS:-https://127.0.0.1:9200} - HUAWEI_CLOUD_USER: ${HUAWEI_CLOUD_USER:-admin} - HUAWEI_CLOUD_PASSWORD: ${HUAWEI_CLOUD_PASSWORD:-admin} - UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} - UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} - TABLESTORE_ENDPOINT: ${TABLESTORE_ENDPOINT:-https://instance-name.cn-hangzhou.ots.aliyuncs.com} - TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name} - TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} - TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} - TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false} - CLICKZETTA_USERNAME: ${CLICKZETTA_USERNAME:-} - CLICKZETTA_PASSWORD: ${CLICKZETTA_PASSWORD:-} - CLICKZETTA_INSTANCE: ${CLICKZETTA_INSTANCE:-} - CLICKZETTA_SERVICE: ${CLICKZETTA_SERVICE:-api.clickzetta.com} - CLICKZETTA_WORKSPACE: ${CLICKZETTA_WORKSPACE:-quick_start} - CLICKZETTA_VCLUSTER: ${CLICKZETTA_VCLUSTER:-default_ap} - CLICKZETTA_SCHEMA: ${CLICKZETTA_SCHEMA:-dify} - CLICKZETTA_BATCH_SIZE: ${CLICKZETTA_BATCH_SIZE:-100} - CLICKZETTA_ENABLE_INVERTED_INDEX: ${CLICKZETTA_ENABLE_INVERTED_INDEX:-true} - CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese} - CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart} - CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance} - IRIS_HOST: ${IRIS_HOST:-iris} - IRIS_SUPER_SERVER_PORT: ${IRIS_SUPER_SERVER_PORT:-1972} - IRIS_WEB_SERVER_PORT: ${IRIS_WEB_SERVER_PORT:-52773} - IRIS_USER: ${IRIS_USER:-_SYSTEM} - IRIS_PASSWORD: ${IRIS_PASSWORD:-Dify@1234} - IRIS_DATABASE: ${IRIS_DATABASE:-USER} - IRIS_SCHEMA: ${IRIS_SCHEMA:-dify} - IRIS_CONNECTION_URL: ${IRIS_CONNECTION_URL:-} - IRIS_MIN_CONNECTION: ${IRIS_MIN_CONNECTION:-1} - IRIS_MAX_CONNECTION: ${IRIS_MAX_CONNECTION:-3} - IRIS_TEXT_INDEX: ${IRIS_TEXT_INDEX:-true} - IRIS_TEXT_INDEX_LANGUAGE: ${IRIS_TEXT_INDEX_LANGUAGE:-en} - IRIS_TIMEZONE: ${IRIS_TIMEZONE:-UTC} - UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} - UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} - UPLOAD_FILE_EXTENSION_BLACKLIST: ${UPLOAD_FILE_EXTENSION_BLACKLIST:-} - SINGLE_CHUNK_ATTACHMENT_LIMIT: ${SINGLE_CHUNK_ATTACHMENT_LIMIT:-10} - IMAGE_FILE_BATCH_LIMIT: ${IMAGE_FILE_BATCH_LIMIT:-10} - ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: ${ATTACHMENT_IMAGE_FILE_SIZE_LIMIT:-2} - ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: ${ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT:-60} - ETL_TYPE: ${ETL_TYPE:-dify} - UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} - UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} - SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} - PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} - CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} - PLUGIN_BASED_TOKEN_COUNTING_ENABLED: ${PLUGIN_BASED_TOKEN_COUNTING_ENABLED:-false} - MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} - UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} - UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} - UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} - SENTRY_DSN: ${SENTRY_DSN:-} - API_SENTRY_DSN: ${API_SENTRY_DSN:-} - API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} - API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} - WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} - PLUGIN_SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false} - PLUGIN_SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-} - NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} - NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} - NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} - NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} - MAIL_TYPE: ${MAIL_TYPE:-} - MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} - RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} - RESEND_API_KEY: ${RESEND_API_KEY:-} - SMTP_SERVER: ${SMTP_SERVER:-} - SMTP_PORT: ${SMTP_PORT:-465} - SMTP_USERNAME: ${SMTP_USERNAME:-} - SMTP_PASSWORD: ${SMTP_PASSWORD:-} - SMTP_USE_TLS: ${SMTP_USE_TLS:-true} - SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} - SMTP_LOCAL_HOSTNAME: ${SMTP_LOCAL_HOSTNAME:-} - SENDGRID_API_KEY: ${SENDGRID_API_KEY:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} - INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} - RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} - EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES: ${EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES:-5} - CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES: ${CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES:-5} - OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES: ${OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES:-5} - CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} - CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} - CODE_EXECUTION_SSL_VERIFY: ${CODE_EXECUTION_SSL_VERIFY:-True} - CODE_EXECUTION_POOL_MAX_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_CONNECTIONS:-100} - CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS:-20} - CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY: ${CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY:-5.0} - CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} - CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} - CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} - CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} - CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-400000} - CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} - CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} - CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} - CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} - CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} - CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} - TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-400000} - WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} - WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} - WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} - MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} - WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} - GRAPH_ENGINE_MIN_WORKERS: ${GRAPH_ENGINE_MIN_WORKERS:-1} - GRAPH_ENGINE_MAX_WORKERS: ${GRAPH_ENGINE_MAX_WORKERS:-10} - GRAPH_ENGINE_SCALE_UP_THRESHOLD: ${GRAPH_ENGINE_SCALE_UP_THRESHOLD:-3} - GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME: ${GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME:-5.0} - WORKFLOW_NODE_EXECUTION_STORAGE: ${WORKFLOW_NODE_EXECUTION_STORAGE:-rdbms} - CORE_WORKFLOW_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository} - CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository} - API_WORKFLOW_RUN_REPOSITORY: ${API_WORKFLOW_RUN_REPOSITORY:-repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository} - API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository} - WORKFLOW_LOG_CLEANUP_ENABLED: ${WORKFLOW_LOG_CLEANUP_ENABLED:-false} - WORKFLOW_LOG_RETENTION_DAYS: ${WORKFLOW_LOG_RETENTION_DAYS:-30} - WORKFLOW_LOG_CLEANUP_BATCH_SIZE: ${WORKFLOW_LOG_CLEANUP_BATCH_SIZE:-100} - WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS: ${WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS:-} - ALIYUN_SLS_ACCESS_KEY_ID: ${ALIYUN_SLS_ACCESS_KEY_ID:-} - ALIYUN_SLS_ACCESS_KEY_SECRET: ${ALIYUN_SLS_ACCESS_KEY_SECRET:-} - ALIYUN_SLS_ENDPOINT: ${ALIYUN_SLS_ENDPOINT:-} - ALIYUN_SLS_REGION: ${ALIYUN_SLS_REGION:-} - ALIYUN_SLS_PROJECT_NAME: ${ALIYUN_SLS_PROJECT_NAME:-} - ALIYUN_SLS_LOGSTORE_TTL: ${ALIYUN_SLS_LOGSTORE_TTL:-365} - LOGSTORE_DUAL_WRITE_ENABLED: ${LOGSTORE_DUAL_WRITE_ENABLED:-false} - LOGSTORE_DUAL_READ_ENABLED: ${LOGSTORE_DUAL_READ_ENABLED:-true} - LOGSTORE_ENABLE_PUT_GRAPH_FIELD: ${LOGSTORE_ENABLE_PUT_GRAPH_FIELD:-true} - HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} - HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} - HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True} - HTTP_REQUEST_MAX_CONNECT_TIMEOUT: ${HTTP_REQUEST_MAX_CONNECT_TIMEOUT:-10} - HTTP_REQUEST_MAX_READ_TIMEOUT: ${HTTP_REQUEST_MAX_READ_TIMEOUT:-600} - HTTP_REQUEST_MAX_WRITE_TIMEOUT: ${HTTP_REQUEST_MAX_WRITE_TIMEOUT:-600} - WEBHOOK_REQUEST_BODY_MAX_SIZE: ${WEBHOOK_REQUEST_BODY_MAX_SIZE:-10485760} - RESPECT_XFORWARD_HEADERS_ENABLED: ${RESPECT_XFORWARD_HEADERS_ENABLED:-false} - SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} - SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} - LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} - MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} - MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} - TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} - EXPERIMENTAL_ENABLE_VINEXT: ${EXPERIMENTAL_ENABLE_VINEXT:-false} - ALLOW_INLINE_STYLES: ${ALLOW_INLINE_STYLES:-false} - ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} - MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} - PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} - MYSQL_HOST_VOLUME: ${MYSQL_HOST_VOLUME:-./volumes/mysql/data} - SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} - SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} - SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} - SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} - SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} - SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} - SANDBOX_PORT: ${SANDBOX_PORT:-8194} - WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} - WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} - WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} - WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} - WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} - WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} - WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} - WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} - WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} - WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} - WEAVIATE_DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false} - WEAVIATE_ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false} - WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false} - WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false} - CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} - CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} - CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} - ORACLE_PWD: ${ORACLE_PWD:-Dify123456} - ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} - ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} - ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} - ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} - ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} - MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} - MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} - ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} - MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} - MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} - PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} - PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} - PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} - PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} - OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} - OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} - OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} - OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} - OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} - OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} - OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} - OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} - OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} - NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} - NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} - NGINX_PORT: ${NGINX_PORT:-80} - NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} - NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} - NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} - NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3} - NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} - NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M} - NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} - NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} - NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} - NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} - CERTBOT_EMAIL: ${CERTBOT_EMAIL:-} - CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} - CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} - SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} - SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} - SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} - SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} - SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} - SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} - SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} - SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} - SSRF_POOL_MAX_CONNECTIONS: ${SSRF_POOL_MAX_CONNECTIONS:-100} - SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS: ${SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS:-20} - SSRF_POOL_KEEPALIVE_EXPIRY: ${SSRF_POOL_KEEPALIVE_EXPIRY:-5.0} - EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} - EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} - POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} - POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} - POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} - POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} - POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} - POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} - CSP_WHITELIST: ${CSP_WHITELIST:-} - CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} - MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} - DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} - EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} - PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} - PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} - PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} - PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} - PLUGIN_MODEL_SCHEMA_CACHE_TTL: ${PLUGIN_MODEL_SCHEMA_CACHE_TTL:-3600} - PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} - PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} - PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} - EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} - EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} - PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} - PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} - ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} - MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} - MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} - CREATORS_PLATFORM_FEATURES_ENABLED: ${CREATORS_PLATFORM_FEATURES_ENABLED:-true} - CREATORS_PLATFORM_API_URL: ${CREATORS_PLATFORM_API_URL:-https://creators.dify.ai} - CREATORS_PLATFORM_OAUTH_CLIENT_ID: ${CREATORS_PLATFORM_OAUTH_CLIENT_ID:-} - FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} - ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES: ${ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES:-true} - PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} - PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} - PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} - PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} - PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0} - PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} - PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} - PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} - PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} - PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin} - PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} - PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} - PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} - PLUGIN_S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} - PLUGIN_S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} - PLUGIN_S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} - PLUGIN_S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} - PLUGIN_AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} - PLUGIN_AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-} - PLUGIN_AWS_REGION: ${PLUGIN_AWS_REGION:-} - PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-} - PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-} - PLUGIN_TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-} - PLUGIN_TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-} - PLUGIN_TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-} - PLUGIN_ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-} - PLUGIN_ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-} - PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-} - PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} - PLUGIN_ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} - PLUGIN_ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} - PLUGIN_VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} - PLUGIN_VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} - PLUGIN_VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} - PLUGIN_VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} - ENABLE_OTEL: ${ENABLE_OTEL:-false} - OTLP_TRACE_ENDPOINT: ${OTLP_TRACE_ENDPOINT:-} - OTLP_METRIC_ENDPOINT: ${OTLP_METRIC_ENDPOINT:-} - OTLP_BASE_ENDPOINT: ${OTLP_BASE_ENDPOINT:-http://localhost:4318} - OTLP_API_KEY: ${OTLP_API_KEY:-} - OTEL_EXPORTER_OTLP_PROTOCOL: ${OTEL_EXPORTER_OTLP_PROTOCOL:-} - OTEL_EXPORTER_TYPE: ${OTEL_EXPORTER_TYPE:-otlp} - OTEL_SAMPLING_RATE: ${OTEL_SAMPLING_RATE:-0.1} - OTEL_BATCH_EXPORT_SCHEDULE_DELAY: ${OTEL_BATCH_EXPORT_SCHEDULE_DELAY:-5000} - OTEL_MAX_QUEUE_SIZE: ${OTEL_MAX_QUEUE_SIZE:-2048} - OTEL_MAX_EXPORT_BATCH_SIZE: ${OTEL_MAX_EXPORT_BATCH_SIZE:-512} - OTEL_METRIC_EXPORT_INTERVAL: ${OTEL_METRIC_EXPORT_INTERVAL:-60000} - OTEL_BATCH_EXPORT_TIMEOUT: ${OTEL_BATCH_EXPORT_TIMEOUT:-10000} - OTEL_METRIC_EXPORT_TIMEOUT: ${OTEL_METRIC_EXPORT_TIMEOUT:-30000} - ALLOW_EMBED: ${ALLOW_EMBED:-false} - QUEUE_MONITOR_THRESHOLD: ${QUEUE_MONITOR_THRESHOLD:-200} - QUEUE_MONITOR_ALERT_EMAILS: ${QUEUE_MONITOR_ALERT_EMAILS:-} - QUEUE_MONITOR_INTERVAL: ${QUEUE_MONITOR_INTERVAL:-30} - SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-false} - SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html} - DSL_EXPORT_ENCRYPT_DATASET_ID: ${DSL_EXPORT_ENCRYPT_DATASET_ID:-true} - DATASET_MAX_SEGMENTS_PER_REQUEST: ${DATASET_MAX_SEGMENTS_PER_REQUEST:-0} - ENABLE_CLEAN_EMBEDDING_CACHE_TASK: ${ENABLE_CLEAN_EMBEDDING_CACHE_TASK:-false} - ENABLE_CLEAN_UNUSED_DATASETS_TASK: ${ENABLE_CLEAN_UNUSED_DATASETS_TASK:-false} - ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false} - ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK: ${ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK:-false} - ENABLE_CLEAN_MESSAGES: ${ENABLE_CLEAN_MESSAGES:-false} - ENABLE_WORKFLOW_RUN_CLEANUP_TASK: ${ENABLE_WORKFLOW_RUN_CLEANUP_TASK:-false} - ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: ${ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK:-false} - ENABLE_DATASETS_QUEUE_MONITOR: ${ENABLE_DATASETS_QUEUE_MONITOR:-false} - ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK: ${ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK:-true} - ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK: ${ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK:-true} - WORKFLOW_SCHEDULE_POLLER_INTERVAL: ${WORKFLOW_SCHEDULE_POLLER_INTERVAL:-1} - WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE: ${WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE:-100} - WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK: ${WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK:-0} - TENANT_ISOLATED_TASK_CONCURRENCY: ${TENANT_ISOLATED_TASK_CONCURRENCY:-1} - ANNOTATION_IMPORT_FILE_SIZE_LIMIT: ${ANNOTATION_IMPORT_FILE_SIZE_LIMIT:-2} - ANNOTATION_IMPORT_MAX_RECORDS: ${ANNOTATION_IMPORT_MAX_RECORDS:-10000} - ANNOTATION_IMPORT_MIN_RECORDS: ${ANNOTATION_IMPORT_MIN_RECORDS:-1} - ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE:-5} - ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR:-20} - ANNOTATION_IMPORT_MAX_CONCURRENT: ${ANNOTATION_IMPORT_MAX_CONCURRENT:-5} - AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-} - SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: ${SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD:-21} - SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE:-1000} - SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL:-200} - SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: ${SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS:-30} - EVENT_BUS_REDIS_URL: ${EVENT_BUS_REDIS_URL:-} - EVENT_BUS_REDIS_CHANNEL_TYPE: ${EVENT_BUS_REDIS_CHANNEL_TYPE:-pubsub} - EVENT_BUS_REDIS_USE_CLUSTERS: ${EVENT_BUS_REDIS_USE_CLUSTERS:-false} - ENABLE_HUMAN_INPUT_TIMEOUT_TASK: ${ENABLE_HUMAN_INPUT_TIMEOUT_TASK:-true} - HUMAN_INPUT_TIMEOUT_TASK_INTERVAL: ${HUMAN_INPUT_TIMEOUT_TASK_INTERVAL:-1} - SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL:-90000} +# Shared configuration using YAML anchors and env_file +x-shared-api-worker-config: &shared-api-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/api.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-config: &shared-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-beat-config: &shared-worker-beat-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker-beat.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always services: # Init container to fix permissions @@ -745,12 +225,9 @@ services: # API service api: + <<: *shared-api-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'api' starts the API server. MODE: api SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -793,12 +270,9 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: + <<: *shared-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker' starts the Celery worker for processing all queues. MODE: worker SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -839,12 +313,9 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: + <<: *shared-worker-beat-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. MODE: beat depends_on: init_permissions: @@ -878,6 +349,12 @@ services: web: image: langgenius/dify-web:1.14.0 restart: always + env_file: + - path: ./envs/core-services/web.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} @@ -952,7 +429,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -994,6 +471,12 @@ services: sandbox: image: langgenius/dify-sandbox:0.2.15 restart: always + env_file: + - path: ./envs/core-services/sandbox.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: # The DifySandbox configurations # Make sure you are changing this key for your deployment with a strong key. @@ -1018,9 +501,24 @@ services: plugin_daemon: image: langgenius/dify-plugin-daemon:0.6.0-local restart: always + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/plugin-daemon.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default environment: - # Use the shared environment variables. - <<: *shared-api-worker-env DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} DB_SSL_MODE: ${DB_SSL_MODE:-disable} SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} diff --git a/docker/envs/core-services/api.env.example b/docker/envs/core-services/api.env.example new file mode 100644 index 0000000000..1a3fc7a4ab --- /dev/null +++ b/docker/envs/core-services/api.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Api Configuration +# ------------------------------ + +MODE=api +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE=1.0 +SENTRY_PROFILES_SAMPLE_RATE=1.0 +PLUGIN_REMOTE_INSTALL_HOST=localhost +PLUGIN_REMOTE_INSTALL_PORT=5003 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_DAEMON_TIMEOUT=600.0 +INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 diff --git a/docker/envs/core-services/plugin-daemon.env.example b/docker/envs/core-services/plugin-daemon.env.example new file mode 100644 index 0000000000..c3b1bef974 --- /dev/null +++ b/docker/envs/core-services/plugin-daemon.env.example @@ -0,0 +1,23 @@ +# ------------------------------ +# Plugin Daemon Configuration +# ------------------------------ + +DB_PLUGIN_DATABASE=dify_plugin +PLUGIN_DAEMON_URL=http://plugin_daemon:5002 +PLUGIN_PPROF_ENABLED=false +PLUGIN_DIFY_INNER_API_URL=http://api:5001 +FORCE_VERIFYING_SIGNATURE=true +PLUGIN_STDIO_BUFFER_SIZE=1024 +PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 +PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 +PLUGIN_MAX_EXECUTION_TIMEOUT=600 +PLUGIN_DEBUGGING_HOST=0.0.0.0 +PLUGIN_DEBUGGING_PORT=5003 +PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi +PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +PLUGIN_DAEMON_PORT=5002 +CELERY_WORKER_CLASS= +PLUGIN_STORAGE_TYPE=local +PLUGIN_STORAGE_LOCAL_ROOT=/app/storage +PLUGIN_WORKING_PATH=/app/storage/cwd +PLUGIN_STORAGE_OSS_BUCKET= diff --git a/docker/envs/core-services/sandbox.env.example b/docker/envs/core-services/sandbox.env.example new file mode 100644 index 0000000000..5d4ee6614b --- /dev/null +++ b/docker/envs/core-services/sandbox.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Sandbox Configuration +# ------------------------------ + +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 +PIP_MIRROR_URL= +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 +SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 +SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200 +SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 +SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 diff --git a/docker/envs/core-services/shared.env.example b/docker/envs/core-services/shared.env.example new file mode 100644 index 0000000000..2a57f6954a --- /dev/null +++ b/docker/envs/core-services/shared.env.example @@ -0,0 +1,469 @@ +# ------------------------------ +# Shared API/Worker Configuration +# ------------------------------ + +CONSOLE_WEB_URL= +SERVICE_API_URL= +TRIGGER_URL=http://localhost +APP_WEB_URL= +FILES_URL= +INTERNAL_FILES_URL= +LANG=C.UTF-8 +LC_ALL=C.UTF-8 +PYTHONIOENCODING=utf-8 +UV_CACHE_DIR=/tmp/.uv-cache +CHECK_UPDATE_URL=https://updates.dify.ai +OPENAI_API_BASE=https://api.openai.com/v1 +MIGRATION_ENABLED=true +FILES_ACCESS_TIMEOUT=300 +ENABLE_COLLABORATION_MODE=false +CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 +CELERY_TASK_ANNOTATIONS=null +AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net +SUPABASE_URL=your-server-url +TIDB_ON_QDRANT_URL=http://127.0.0.1 +TIDB_ON_QDRANT_API_KEY=dify +TIDB_API_URL=http://127.0.0.1 +TIDB_IAM_API_URL=http://127.0.0.1 +TIDB_REGION=regions/aws-us-east-1 +TIDB_PROJECT_ID=dify +TIDB_SPEND_LIMIT=100 +TENCENT_VECTOR_DB_URL=http://127.0.0.1 +TENCENT_VECTOR_DB_API_KEY=dify +LINDORM_URL=http://localhost:30070 +LINDORM_USERNAME=admin +UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io +UPLOAD_FILE_SIZE_LIMIT=15 +UPLOAD_FILE_BATCH_LIMIT=5 +UPLOAD_FILE_EXTENSION_BLACKLIST= +SINGLE_CHUNK_ATTACHMENT_LIMIT=10 +IMAGE_FILE_BATCH_LIMIT=10 +ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2 +ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60 +ETL_TYPE=dify +UNSTRUCTURED_API_URL= +MULTIMODAL_SEND_FORMAT=base64 +UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 +UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 +UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 +API_SENTRY_DSN= +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 +WEB_SENTRY_DSN= +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= +NOTION_INTEGRATION_TYPE=public +RESEND_API_URL=https://api.resend.com +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 +PGDATA=/var/lib/postgresql/data/pgdata +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600 +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} +LOG_LEVEL=INFO +LOG_OUTPUT_FORMAT=text +LOG_FILE=/app/logs/server.log +LOG_FILE_MAX_SIZE=20 +LOG_FILE_BACKUP_COUNT=5 +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +LOG_TZ=UTC +DEBUG=false +FLASK_DEBUG=false +ENABLE_REQUEST_LOGGING=False +WORKFLOW_LOG_CLEANUP_ENABLED=false +WORKFLOW_LOG_RETENTION_DAYS=30 +WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 +WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS= +EXPOSE_PLUGIN_DEBUGGING_HOST=localhost +EXPOSE_PLUGIN_DEBUGGING_PORT=5003 +DEPLOY_ENV=PRODUCTION +ACCESS_TOKEN_EXPIRE_MINUTES=60 +REFRESH_TOKEN_EXPIRE_DAYS=30 +APP_DEFAULT_ACTIVE_REQUESTS=0 +APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 +DIFY_BIND_ADDRESS=0.0.0.0 +DIFY_PORT=5001 +SERVER_WORKER_AMOUNT=1 +SERVER_WORKER_CLASS=gevent +SERVER_WORKER_CONNECTIONS=10 +CELERY_SENTINEL_PASSWORD= +S3_ACCESS_KEY= +S3_SECRET_KEY= +ARCHIVE_STORAGE_ACCESS_KEY= +ARCHIVE_STORAGE_SECRET_KEY= +AZURE_BLOB_ACCOUNT_KEY=difyai +ALIYUN_OSS_ACCESS_KEY=your-access-key +ALIYUN_OSS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_ID=your-secret-id +OCI_ACCESS_KEY=your-access-key +OCI_SECRET_KEY=your-secret-key +HUAWEI_OBS_SECRET_KEY=your-secret-key +HUAWEI_OBS_ACCESS_KEY=your-access-key +VOLCENGINE_TOS_SECRET_KEY=your-secret-key +VOLCENGINE_TOS_ACCESS_KEY=your-access-key +BAIDU_OBS_SECRET_KEY=your-secret-key +BAIDU_OBS_ACCESS_KEY=your-access-key +SUPABASE_API_KEY=your-access-key +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +RELYT_PASSWORD=difyai123456 +LINDORM_PASSWORD=admin +LINDORM_USING_UGC=True +LINDORM_QUERY_TIMEOUT=1 +HUAWEI_CLOUD_PASSWORD=admin +UPSTASH_VECTOR_TOKEN=dify +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx +TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false +CLICKZETTA_PASSWORD= +CLICKZETTA_INSTANCE= +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=quick_start +CLICKZETTA_VCLUSTER=default_ap +CLICKZETTA_SCHEMA=dify +CLICKZETTA_BATCH_SIZE=100 +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese +CLICKZETTA_ANALYZER_MODE=smart +UNSTRUCTURED_API_KEY= +SCARF_NO_ANALYTICS=true +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false +NOTION_CLIENT_SECRET= +NOTION_CLIENT_ID= +NOTION_INTERNAL_SECRET= +MAIL_TYPE=resend +MAIL_DEFAULT_SEND_FROM= +RESEND_API_KEY=your-resend-api-key +SMTP_SERVER= +SMTP_PORT=465 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_USE_TLS=true +SMTP_OPPORTUNISTIC_TLS=false +SMTP_LOCAL_HOSTNAME= +SENDGRID_API_KEY= +INVITE_EXPIRY_HOURS=72 +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_EXECUTION_SSL_VERIFY=True +CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 +CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_MAX_NUMBER=9223372036854775807 +CODE_MIN_NUMBER=-9223372036854775808 +CODE_MAX_DEPTH=5 +CODE_MAX_PRECISION=20 +CODE_MAX_STRING_LENGTH=400000 +CODE_MAX_STRING_ARRAY_LENGTH=30 +CODE_MAX_OBJECT_ARRAY_LENGTH=30 +CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=400000 +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 +MAX_VARIABLE_SIZE=204800 +WORKFLOW_FILE_UPLOAD_LIMIT=10 +GRAPH_ENGINE_MIN_WORKERS=1 +GRAPH_ENGINE_MAX_WORKERS=10 +GRAPH_ENGINE_SCALE_UP_THRESHOLD=3 +GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0 +ALIYUN_SLS_ACCESS_KEY_ID= +ALIYUN_SLS_ACCESS_KEY_SECRET= +WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760 +RESPECT_XFORWARD_HEADERS_ENABLED=false +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 +SSRF_POOL_MAX_CONNECTIONS=100 +SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +SSRF_POOL_KEEPALIVE_EXPIRY=5.0 +PLUGIN_AWS_ACCESS_KEY= +PLUGIN_AWS_SECRET_KEY= +PLUGIN_AWS_REGION= +PLUGIN_TENCENT_COS_SECRET_KEY= +PLUGIN_TENCENT_COS_SECRET_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= +PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= +PLUGIN_VOLCENGINE_TOS_SECRET_KEY= +OTLP_API_KEY= +OTEL_EXPORTER_OTLP_PROTOCOL= +OTEL_EXPORTER_TYPE=otlp +OTEL_SAMPLING_RATE=0.1 +OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 +OTEL_MAX_QUEUE_SIZE=2048 +OTEL_MAX_EXPORT_BATCH_SIZE=512 +OTEL_METRIC_EXPORT_INTERVAL=60000 +OTEL_BATCH_EXPORT_TIMEOUT=10000 +OTEL_METRIC_EXPORT_TIMEOUT=30000 +QUEUE_MONITOR_THRESHOLD=200 +QUEUE_MONITOR_ALERT_EMAILS= +QUEUE_MONITOR_INTERVAL=30 +SWAGGER_UI_ENABLED=false +SWAGGER_UI_PATH=/swagger-ui.html +DSL_EXPORT_ENCRYPT_DATASET_ID=true +DATASET_MAX_SEGMENTS_PER_REQUEST=0 +ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false +ENABLE_CLEAN_UNUSED_DATASETS_TASK=false +ENABLE_CREATE_TIDB_SERVERLESS_TASK=false +ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false +ENABLE_CLEAN_MESSAGES=false +ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false +ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false +ENABLE_DATASETS_QUEUE_MONITOR=false +ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true +ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true +WORKFLOW_SCHEDULE_POLLER_INTERVAL=1 +WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100 +WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0 +TENANT_ISOLATED_TASK_CONCURRENCY=1 +ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2 +ANNOTATION_IMPORT_MAX_RECORDS=10000 +ANNOTATION_IMPORT_MIN_RECORDS=1 +ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5 +ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20 +ANNOTATION_IMPORT_MAX_CONCURRENT=5 +CREATORS_PLATFORM_FEATURES_ENABLED=true +CREATORS_PLATFORM_API_URL=https://creators.dify.ai +CREATORS_PLATFORM_OAUTH_CLIENT_ID= +TIDB_VECTOR_DATABASE=dify +ALIBABACLOUD_MYSQL_HOST=127.0.0.1 +ALIBABACLOUD_MYSQL_PORT=3306 +ALIBABACLOUD_MYSQL_USER=root +ALIBABACLOUD_MYSQL_DATABASE=dify +ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 +ALIBABACLOUD_MYSQL_HNSW_M=6 +RELYT_DATABASE=postgres +TENCENT_VECTOR_DB_DATABASE=dify +BAIDU_VECTOR_DB_DATABASE=dify +EXPOSE_PLUGIN_DAEMON_PORT=5002 +GUNICORN_TIMEOUT=360 +CELERY_WORKER_AMOUNT= +CELERY_AUTO_SCALE=false +CELERY_MAX_WORKERS= +CELERY_MIN_WORKERS= +API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 +API_TOOL_DEFAULT_READ_TIMEOUT=60 +CELERY_BACKEND=redis +CELERY_USE_SENTINEL=false +CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 +WEB_API_CORS_ALLOW_ORIGINS=* +CONSOLE_CORS_ALLOW_ORIGINS=* +COOKIE_DOMAIN= +OPENDAL_SCHEME=fs +OPENDAL_FS_ROOT=storage +CLICKZETTA_VOLUME_TYPE=user +CLICKZETTA_VOLUME_NAME= +CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ +CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km +S3_ENDPOINT= +S3_REGION=us-east-1 +S3_BUCKET_NAME=difyai +S3_ADDRESS_STYLE=auto +S3_USE_AWS_MANAGED_IAM=false +ARCHIVE_STORAGE_ENABLED=false +ARCHIVE_STORAGE_ENDPOINT= +ARCHIVE_STORAGE_ARCHIVE_BUCKET= +ARCHIVE_STORAGE_EXPORT_BUCKET= +ARCHIVE_STORAGE_REGION=auto +AZURE_BLOB_ACCOUNT_NAME=difyai +AZURE_BLOB_CONTAINER_NAME=difyai-container +GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= +ALIYUN_OSS_BUCKET_NAME=your-bucket-name +ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com +ALIYUN_OSS_REGION=ap-southeast-1 +ALIYUN_OSS_AUTH_VERSION=v4 +ALIYUN_OSS_PATH=your-path +ALIYUN_CLOUDBOX_ID=your-cloudbox-id +TENCENT_COS_BUCKET_NAME=your-bucket-name +TENCENT_COS_REGION=your-region +TENCENT_COS_SCHEME=your-scheme +TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain +OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com +OCI_BUCKET_NAME=your-bucket-name +OCI_REGION=us-ashburn-1 +HUAWEI_OBS_BUCKET_NAME=your-bucket-name +HUAWEI_OBS_SERVER=your-server-url +HUAWEI_OBS_PATH_STYLE=false +VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name +VOLCENGINE_TOS_ENDPOINT=your-server-url +VOLCENGINE_TOS_REGION=your-region +BAIDU_OBS_BUCKET_NAME=your-bucket-name +BAIDU_OBS_ENDPOINT=your-server-url +SUPABASE_BUCKET_NAME=your-bucket-name +TENCENT_VECTOR_DB_TIMEOUT=30 +TENCENT_VECTOR_DB_USERNAME=dify +TENCENT_VECTOR_DB_SHARD=1 +TENCENT_VECTOR_DB_REPLICAS=2 +TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false +BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 +BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 +BAIDU_VECTOR_DB_ACCOUNT=root +BAIDU_VECTOR_DB_API_KEY=dify +BAIDU_VECTOR_DB_SHARD=1 +BAIDU_VECTOR_DB_REPLICAS=3 +BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER +BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE +BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500 +BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05 +BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300 +HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 +HUAWEI_CLOUD_USER=admin +WORKFLOW_NODE_EXECUTION_STORAGE=rdbms +CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository +CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository +API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository +API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository +ALIYUN_SLS_ENDPOINT= +ALIYUN_SLS_REGION= +ALIYUN_SLS_PROJECT_NAME= +ALIYUN_SLS_LOGSTORE_TTL=365 +LOGSTORE_DUAL_WRITE_ENABLED=false +LOGSTORE_DUAL_READ_ENABLED=true +LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 +HTTP_REQUEST_NODE_SSL_VERIFY=True +HTTP_REQUEST_MAX_CONNECT_TIMEOUT=10 +HTTP_REQUEST_MAX_READ_TIMEOUT=600 +HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 +PLUGIN_INSTALLED_PATH=plugin +PLUGIN_PACKAGE_CACHE_PATH=plugin_packages +PLUGIN_MEDIA_CACHE_PATH=assets +PLUGIN_S3_USE_AWS=false +PLUGIN_S3_USE_AWS_MANAGED_IAM=false +PLUGIN_S3_ENDPOINT= +PLUGIN_S3_USE_PATH_STYLE=false +PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= +PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= +PLUGIN_TENCENT_COS_REGION= +PLUGIN_ALIYUN_OSS_REGION= +PLUGIN_ALIYUN_OSS_ENDPOINT= +PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 +PLUGIN_ALIYUN_OSS_PATH= +PLUGIN_VOLCENGINE_TOS_ENDPOINT= +PLUGIN_VOLCENGINE_TOS_REGION= +ENABLE_OTEL=false +OTLP_TRACE_ENDPOINT= +OTLP_METRIC_ENDPOINT= +# Prefix used to create collection name in vector database +OTLP_BASE_ENDPOINT=http://localhost:4318 +WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051 +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_REGION_ID=cn-hangzhou +ANALYTICDB_INSTANCE_ID=gp-ab123456 +ANALYTICDB_ACCOUNT=testaccount +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE=dify +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +ANALYTICDB_HOST=gp-test.aliyuncs.com +ANALYTICDB_PORT=5432 +ANALYTICDB_MIN_CONNECTION=1 +ANALYTICDB_MAX_CONNECTION=5 +TIDB_VECTOR_HOST=tidb +TIDB_VECTOR_PORT=4000 +TIDB_VECTOR_USER= +TIDB_VECTOR_PASSWORD= +TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 +TIDB_ON_QDRANT_GRPC_ENABLED=false +TIDB_ON_QDRANT_GRPC_PORT=6334 +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +RELYT_HOST=db +RELYT_PORT=5432 +RELYT_USER=postgres +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +VIKINGDB_REGION=cn-shanghai +VIKINGDB_HOST=api-vikingdb.xxx.volces.com +VIKINGDB_SCHEME=http +VIKINGDB_CONNECTION_TIMEOUT=30 +VIKINGDB_SOCKET_TIMEOUT=30 +TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com +TABLESTORE_INSTANCE_NAME=instance-name +CLICKZETTA_USERNAME= +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 +POSITION_TOOL_PINS= +POSITION_TOOL_INCLUDES= +POSITION_TOOL_EXCLUDES= +POSITION_PROVIDER_PINS= +POSITION_PROVIDER_INCLUDES= +POSITION_PROVIDER_EXCLUDES= +CREATE_TIDB_SERVICE_JOB_ENABLED=false +MAX_SUBMIT_COUNT=100 + +# Vector Store Configuration +STORAGE_TYPE=opendal +VECTOR_STORE=weaviate +VECTOR_INDEX_NAME_PREFIX=Vector_index +WEAVIATE_ENDPOINT=http://weaviate:8080 +WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_TOKENIZATION=word +OCEANBASE_VECTOR_HOST=oceanbase +OCEANBASE_VECTOR_PORT=2881 +OCEANBASE_VECTOR_USER=root@test +OCEANBASE_VECTOR_PASSWORD=difyai123456 +OCEANBASE_VECTOR_DATABASE=test +OCEANBASE_ENABLE_HYBRID_SEARCH=false +OCEANBASE_FULLTEXT_PARSER=ik +SEEKDB_MEMORY_LIMIT=2G +QDRANT_URL=http://qdrant:6333 +QDRANT_API_KEY=difyai123456 +QDRANT_CLIENT_TIMEOUT=20 +QDRANT_GRPC_ENABLED=false +QDRANT_GRPC_PORT=6334 +QDRANT_REPLICATION_FACTOR=1 +MILVUS_URI=http://host.docker.internal:19530 +MILVUS_TOKEN= +MILVUS_USER= +MILVUS_PASSWORD= +MILVUS_ANALYZER_PARAMS= +PGVECTOR_HOST=pgvector +PGVECTOR_PORT=5432 +PGVECTOR_USER=postgres +PGVECTOR_PASSWORD=difyai123456 +PGVECTOR_DATABASE=dify +PGVECTOR_MIN_CONNECTION=1 +PGVECTOR_MAX_CONNECTION=5 +PGVECTOR_PG_BIGM=false +PGVECTOR_PG_BIGM_VERSION=1.2-20240606 + +# Hologres Configuration +HOLOGRES_HOST= +HOLOGRES_PORT=80 +HOLOGRES_DATABASE= +HOLOGRES_ACCESS_KEY_ID= +HOLOGRES_ACCESS_KEY_SECRET= +HOLOGRES_SCHEMA=public +HOLOGRES_TOKENIZER=jieba +HOLOGRES_DISTANCE_METHOD=Cosine +HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq +HOLOGRES_MAX_DEGREE=64 +HOLOGRES_EF_CONSTRUCTION=400 + +# Milvus API Configuration +MILVUS_DATABASE= +MILVUS_ENABLE_HYBRID_SEARCH=False + +# Human Input Task Configuration +ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true +HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1 diff --git a/docker/envs/core-services/web.env.example b/docker/envs/core-services/web.env.example new file mode 100644 index 0000000000..d366cd87ba --- /dev/null +++ b/docker/envs/core-services/web.env.example @@ -0,0 +1,30 @@ +# ------------------------------ +# Web Configuration +# ------------------------------ + +CONSOLE_API_URL= +APP_API_URL= +SENTRY_DSN= +NEXT_PUBLIC_SOCKET_URL=ws://localhost +EXPERIMENTAL_ENABLE_VINEXT=false +LOOP_NODE_MAX_COUNT=100 +MAX_TOOLS_NUM=10 +MAX_PARALLEL_LIMIT=10 +MAX_ITERATIONS_NUM=99 +TEXT_GENERATION_TIMEOUT_MS=60000 +ALLOW_INLINE_STYLES=false +ALLOW_UNSAFE_DATA_SCHEME=false +MAX_TREE_DEPTH=50 +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 +ALLOW_EMBED=false +AMPLITUDE_API_KEY= +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true +NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false +NEXT_PUBLIC_COOKIE_DOMAIN= +NEXT_PUBLIC_BATCH_CONCURRENCY=5 +CSP_WHITELIST= +TOP_K_MAX_VALUE=10 diff --git a/docker/envs/core-services/worker-beat.env.example b/docker/envs/core-services/worker-beat.env.example new file mode 100644 index 0000000000..380fe02b68 --- /dev/null +++ b/docker/envs/core-services/worker-beat.env.example @@ -0,0 +1,8 @@ +# ------------------------------ +# Worker Beat Configuration +# ------------------------------ + +MODE=beat +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s diff --git a/docker/envs/core-services/worker.env.example b/docker/envs/core-services/worker.env.example new file mode 100644 index 0000000000..58cf4ea901 --- /dev/null +++ b/docker/envs/core-services/worker.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Worker Configuration +# ------------------------------ + +MODE=worker +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE=1.0 +SENTRY_PROFILES_SAMPLE_RATE=1.0 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s diff --git a/docker/envs/databases/db-mysql.env.example b/docker/envs/databases/db-mysql.env.example new file mode 100644 index 0000000000..b3ea6801fe --- /dev/null +++ b/docker/envs/databases/db-mysql.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Db Mysql Configuration +# ------------------------------ + +MYSQL_INNODB_LOG_FILE_SIZE=128M +MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2 +MYSQL_MAX_CONNECTIONS=1000 +MYSQL_INNODB_BUFFER_POOL_SIZE=512M +MYSQL_HOST_VOLUME=./volumes/mysql/data diff --git a/docker/envs/databases/db-postgres.env.example b/docker/envs/databases/db-postgres.env.example new file mode 100644 index 0000000000..14cefb6bee --- /dev/null +++ b/docker/envs/databases/db-postgres.env.example @@ -0,0 +1,26 @@ +# ------------------------------ +# Db Postgres Configuration +# ------------------------------ + +PGDATA=/var/lib/postgresql/data/pgdata +DB_TYPE=postgresql +DB_USERNAME=postgres +DB_PASSWORD=difyai123456 +DB_HOST=db_postgres +DB_PORT=5432 +DB_DATABASE=dify +SQLALCHEMY_POOL_SIZE=30 +SQLALCHEMY_MAX_OVERFLOW=10 +SQLALCHEMY_POOL_RECYCLE=3600 +SQLALCHEMY_ECHO=false +SQLALCHEMY_POOL_PRE_PING=false +SQLALCHEMY_POOL_USE_LIFO=false +SQLALCHEMY_POOL_TIMEOUT=30 +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback +POSTGRES_MAX_CONNECTIONS=100 +POSTGRES_SHARED_BUFFERS=128MB +POSTGRES_WORK_MEM=4MB +POSTGRES_MAINTENANCE_WORK_MEM=64MB +POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB +POSTGRES_STATEMENT_TIMEOUT=0 +POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0 diff --git a/docker/envs/databases/redis.env.example b/docker/envs/databases/redis.env.example new file mode 100644 index 0000000000..74bcb6525e --- /dev/null +++ b/docker/envs/databases/redis.env.example @@ -0,0 +1,35 @@ +# ------------------------------ +# Redis Configuration +# ------------------------------ + +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_USERNAME= +REDIS_PASSWORD=difyai123456 +REDIS_USE_SSL=false +REDIS_SSL_CERT_REQS=CERT_NONE +REDIS_SSL_CA_CERTS= +REDIS_SSL_CERTFILE= +REDIS_SSL_KEYFILE= +REDIS_DB=0 +REDIS_KEY_PREFIX= +REDIS_MAX_CONNECTIONS= +REDIS_USE_SENTINEL=false +REDIS_SENTINELS= +REDIS_SENTINEL_SERVICE_NAME= +REDIS_SENTINEL_USERNAME= +REDIS_SENTINEL_PASSWORD= +REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 +REDIS_USE_CLUSTERS=false +REDIS_CLUSTERS= +REDIS_CLUSTERS_PASSWORD= +REDIS_RETRY_RETRIES=3 +REDIS_RETRY_BACKOFF_BASE=1.0 +REDIS_RETRY_BACKOFF_CAP=10.0 +REDIS_SOCKET_TIMEOUT=5.0 +REDIS_SOCKET_CONNECT_TIMEOUT=5.0 +REDIS_HEALTH_CHECK_INTERVAL=30 +EVENT_BUS_REDIS_URL= +EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub +EVENT_BUS_REDIS_USE_CLUSTERS=false +BROKER_USE_SSL=false diff --git a/docker/envs/infrastructure/certbot.env.example b/docker/envs/infrastructure/certbot.env.example new file mode 100644 index 0000000000..c654fbe02f --- /dev/null +++ b/docker/envs/infrastructure/certbot.env.example @@ -0,0 +1,7 @@ +# ------------------------------ +# Certbot Configuration +# ------------------------------ + +CERTBOT_EMAIL=your_email@example.com +CERTBOT_DOMAIN=your_domain.com +CERTBOT_OPTIONS= diff --git a/docker/envs/infrastructure/etcd.env.example b/docker/envs/infrastructure/etcd.env.example new file mode 100644 index 0000000000..4dca26671a --- /dev/null +++ b/docker/envs/infrastructure/etcd.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Etcd Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/milvus-standalone.env.example b/docker/envs/infrastructure/milvus-standalone.env.example new file mode 100644 index 0000000000..7e87ed2648 --- /dev/null +++ b/docker/envs/infrastructure/milvus-standalone.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Milvus Standalone Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/minio.env.example b/docker/envs/infrastructure/minio.env.example new file mode 100644 index 0000000000..7c8e1fa35a --- /dev/null +++ b/docker/envs/infrastructure/minio.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Minio Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/nginx.env.example b/docker/envs/infrastructure/nginx.env.example new file mode 100644 index 0000000000..fbe86680ba --- /dev/null +++ b/docker/envs/infrastructure/nginx.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Nginx Configuration +# ------------------------------ + +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +NGINX_PORT=80 +NGINX_SSL_PORT=443 +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s +NGINX_ENABLE_CERTBOT_CHALLENGE=false diff --git a/docker/envs/infrastructure/ssrf-proxy.env.example b/docker/envs/infrastructure/ssrf-proxy.env.example new file mode 100644 index 0000000000..210a782494 --- /dev/null +++ b/docker/envs/infrastructure/ssrf-proxy.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Ssrf Proxy Configuration +# ------------------------------ + +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 +SSRF_POOL_MAX_CONNECTIONS=100 +SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +SSRF_POOL_KEEPALIVE_EXPIRY=5.0 diff --git a/docker/middleware.env.example b/docker/envs/middleware.env.example similarity index 100% rename from docker/middleware.env.example rename to docker/envs/middleware.env.example diff --git a/docker/envs/security.env.example b/docker/envs/security.env.example new file mode 100644 index 0000000000..787aef2706 --- /dev/null +++ b/docker/envs/security.env.example @@ -0,0 +1,40 @@ +# ------------------------------ +# Security Configuration +# ------------------------------ + +TIDB_ON_QDRANT_API_KEY=dify +TENCENT_VECTOR_DB_API_KEY=dify +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +RELYT_PASSWORD=difyai123456 +LINDORM_PASSWORD=admin +HUAWEI_CLOUD_PASSWORD=admin +UPSTASH_VECTOR_TOKEN=dify +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx +UNSTRUCTURED_API_KEY= +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false +NOTION_CLIENT_SECRET= +NOTION_INTERNAL_SECRET= +RESEND_API_KEY=your-resend-api-key +SMTP_PASSWORD= +SENDGRID_API_KEY= +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 +CODE_EXECUTION_API_KEY=dify-sandbox +ALIYUN_SLS_ACCESS_KEY_ID= +ALIYUN_SLS_ACCESS_KEY_SECRET= +OTLP_API_KEY= +BAIDU_VECTOR_DB_API_KEY=dify +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +TIDB_VECTOR_PASSWORD= +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U +INIT_PASSWORD= diff --git a/docker/envs/vectorstores/chroma.env.example b/docker/envs/vectorstores/chroma.env.example new file mode 100644 index 0000000000..2a15375a3d --- /dev/null +++ b/docker/envs/vectorstores/chroma.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Chroma Configuration +# ------------------------------ + +CHROMA_DATABASE=default_database +CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider +CHROMA_AUTH_CREDENTIALS= +CHROMA_HOST=127.0.0.1 +CHROMA_PORT=8000 +CHROMA_TENANT=default_tenant +CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 +CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider +CHROMA_IS_PERSISTENT=TRUE diff --git a/docker/envs/vectorstores/couchbase.env.example b/docker/envs/vectorstores/couchbase.env.example new file mode 100644 index 0000000000..4329d9c723 --- /dev/null +++ b/docker/envs/vectorstores/couchbase.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Couchbase Configuration +# ------------------------------ + +COUCHBASE_PASSWORD=password +COUCHBASE_BUCKET_NAME=Embeddings +COUCHBASE_SCOPE_NAME=_default +COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server +COUCHBASE_USER=Administrator diff --git a/docker/envs/vectorstores/elasticsearch.env.example b/docker/envs/vectorstores/elasticsearch.env.example new file mode 100644 index 0000000000..2aaa965cd7 --- /dev/null +++ b/docker/envs/vectorstores/elasticsearch.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Elasticsearch Configuration +# ------------------------------ + +ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL +ELASTICSEARCH_PASSWORD=elastic +KIBANA_PORT=5601 +ELASTICSEARCH_USE_CLOUD=false +ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY +ELASTICSEARCH_VERIFY_CERTS=False +ELASTICSEARCH_CA_CERTS= +ELASTICSEARCH_REQUEST_TIMEOUT=100000 +ELASTICSEARCH_RETRY_ON_TIMEOUT=True +ELASTICSEARCH_MAX_RETRIES=10 +ELASTICSEARCH_HOST=0.0.0.0 +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_USERNAME=elastic diff --git a/docker/envs/vectorstores/iris.env.example b/docker/envs/vectorstores/iris.env.example new file mode 100644 index 0000000000..b1eb39bff8 --- /dev/null +++ b/docker/envs/vectorstores/iris.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Iris Configuration +# ------------------------------ + +IRIS_CONNECTION_URL= +IRIS_MIN_CONNECTION=1 +IRIS_MAX_CONNECTION=3 +IRIS_TEXT_INDEX=true +IRIS_TEXT_INDEX_LANGUAGE=en +IRIS_TIMEZONE=UTC +IRIS_PASSWORD=Dify@1234 +IRIS_DATABASE=USER +IRIS_SCHEMA=dify +IRIS_HOST=iris +IRIS_SUPER_SERVER_PORT=1972 +IRIS_WEB_SERVER_PORT=52773 +IRIS_USER=_SYSTEM diff --git a/docker/envs/vectorstores/matrixone.env.example b/docker/envs/vectorstores/matrixone.env.example new file mode 100644 index 0000000000..931375f8b4 --- /dev/null +++ b/docker/envs/vectorstores/matrixone.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Matrixone Configuration +# ------------------------------ + +MATRIXONE_PASSWORD=111 +MATRIXONE_HOST=matrixone +MATRIXONE_PORT=6001 +MATRIXONE_USER=dump +MATRIXONE_DATABASE=dify diff --git a/docker/envs/vectorstores/milvus.env.example b/docker/envs/vectorstores/milvus.env.example new file mode 100644 index 0000000000..d16879ca7b --- /dev/null +++ b/docker/envs/vectorstores/milvus.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Milvus Configuration +# ------------------------------ + +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=minioadmin +ETCD_ENDPOINTS=etcd:2379 +MINIO_ADDRESS=minio:9000 +ETCD_AUTO_COMPACTION_MODE=revision +ETCD_AUTO_COMPACTION_RETENTION=1000 +ETCD_QUOTA_BACKEND_BYTES=4294967296 +ETCD_SNAPSHOT_COUNT=50000 +MILVUS_AUTHORIZATION_ENABLED=true diff --git a/docker/envs/vectorstores/myscale.env.example b/docker/envs/vectorstores/myscale.env.example new file mode 100644 index 0000000000..eaa9e88cc0 --- /dev/null +++ b/docker/envs/vectorstores/myscale.env.example @@ -0,0 +1,10 @@ +# ------------------------------ +# Myscale Configuration +# ------------------------------ + +MYSCALE_PASSWORD= +MYSCALE_DATABASE=dify +MYSCALE_FTS_PARAMS= +MYSCALE_HOST=myscale +MYSCALE_PORT=8123 +MYSCALE_USER=default diff --git a/docker/envs/vectorstores/oceanbase.env.example b/docker/envs/vectorstores/oceanbase.env.example new file mode 100644 index 0000000000..42bed8df6a --- /dev/null +++ b/docker/envs/vectorstores/oceanbase.env.example @@ -0,0 +1,6 @@ +# ------------------------------ +# Oceanbase Configuration +# ------------------------------ + +OCEANBASE_CLUSTER_NAME=difyai +OCEANBASE_MEMORY_LIMIT=6G diff --git a/docker/envs/vectorstores/opengauss.env.example b/docker/envs/vectorstores/opengauss.env.example new file mode 100644 index 0000000000..9f58499b64 --- /dev/null +++ b/docker/envs/vectorstores/opengauss.env.example @@ -0,0 +1,12 @@ +# ------------------------------ +# Opengauss Configuration +# ------------------------------ + +OPENGAUSS_PASSWORD=Dify@123 +OPENGAUSS_DATABASE=dify +OPENGAUSS_MIN_CONNECTION=1 +OPENGAUSS_MAX_CONNECTION=5 +OPENGAUSS_ENABLE_PQ=false +OPENGAUSS_HOST=opengauss +OPENGAUSS_PORT=6600 +OPENGAUSS_USER=postgres diff --git a/docker/envs/vectorstores/opensearch.env.example b/docker/envs/vectorstores/opensearch.env.example new file mode 100644 index 0000000000..a6a9283378 --- /dev/null +++ b/docker/envs/vectorstores/opensearch.env.example @@ -0,0 +1,22 @@ +# ------------------------------ +# Opensearch Configuration +# ------------------------------ + +OPENSEARCH_PASSWORD=admin +OPENSEARCH_AWS_REGION=ap-southeast-1 +OPENSEARCH_AWS_SERVICE=aoss +OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 +OPENSEARCH_MEMLOCK_SOFT=-1 +OPENSEARCH_MEMLOCK_HARD=-1 +OPENSEARCH_NOFILE_SOFT=65536 +OPENSEARCH_NOFILE_HARD=65536 +OPENSEARCH_HOST=opensearch +OPENSEARCH_PORT=9200 +OPENSEARCH_SECURE=true +OPENSEARCH_VERIFY_CERTS=true +OPENSEARCH_AUTH_METHOD=basic +OPENSEARCH_USER=admin +OPENSEARCH_DISCOVERY_TYPE=single-node +OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true +OPENSEARCH_JAVA_OPTS_MIN=512m +OPENSEARCH_JAVA_OPTS_MAX=1024m diff --git a/docker/envs/vectorstores/oracle.env.example b/docker/envs/vectorstores/oracle.env.example new file mode 100644 index 0000000000..c8f24db41a --- /dev/null +++ b/docker/envs/vectorstores/oracle.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Oracle Configuration +# ------------------------------ + +ORACLE_PASSWORD=dify +ORACLE_DSN=oracle:1521/FREEPDB1 +ORACLE_CONFIG_DIR=/app/api/storage/wallet +ORACLE_WALLET_LOCATION=/app/api/storage/wallet +ORACLE_WALLET_PASSWORD=dify +ORACLE_IS_AUTONOMOUS=false +ORACLE_USER=dify +ORACLE_PWD=Dify123456 +ORACLE_CHARACTERSET=AL32UTF8 diff --git a/docker/envs/vectorstores/pgvecto-rs.env.example b/docker/envs/vectorstores/pgvecto-rs.env.example new file mode 100644 index 0000000000..6428e5dd67 --- /dev/null +++ b/docker/envs/vectorstores/pgvecto-rs.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Pgvecto Rs Configuration +# ------------------------------ + +PGVECTO_RS_HOST=pgvecto-rs +PGVECTO_RS_PORT=5432 +PGVECTO_RS_USER=postgres +PGVECTO_RS_PASSWORD=difyai123456 +PGVECTO_RS_DATABASE=dify diff --git a/docker/envs/vectorstores/pgvector.env.example b/docker/envs/vectorstores/pgvector.env.example new file mode 100644 index 0000000000..9fd1dbf962 --- /dev/null +++ b/docker/envs/vectorstores/pgvector.env.example @@ -0,0 +1,8 @@ +# ------------------------------ +# Pgvector Configuration +# ------------------------------ + +PGVECTOR_PGUSER=postgres +PGVECTOR_POSTGRES_PASSWORD=difyai123456 +PGVECTOR_POSTGRES_DB=dify +PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata diff --git a/docker/envs/vectorstores/qdrant.env.example b/docker/envs/vectorstores/qdrant.env.example new file mode 100644 index 0000000000..a3555fe547 --- /dev/null +++ b/docker/envs/vectorstores/qdrant.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Qdrant Configuration +# ------------------------------ + diff --git a/docker/envs/vectorstores/seekdb.env.example b/docker/envs/vectorstores/seekdb.env.example new file mode 100644 index 0000000000..4307fbede2 --- /dev/null +++ b/docker/envs/vectorstores/seekdb.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Seekdb Configuration +# ------------------------------ + diff --git a/docker/envs/vectorstores/vastbase.env.example b/docker/envs/vectorstores/vastbase.env.example new file mode 100644 index 0000000000..2c9db50fbe --- /dev/null +++ b/docker/envs/vectorstores/vastbase.env.example @@ -0,0 +1,11 @@ +# ------------------------------ +# Vastbase Configuration +# ------------------------------ + +VASTBASE_PASSWORD=Difyai123456 +VASTBASE_DATABASE=dify +VASTBASE_MIN_CONNECTION=1 +VASTBASE_MAX_CONNECTION=5 +VASTBASE_HOST=vastbase +VASTBASE_PORT=5432 +VASTBASE_USER=dify diff --git a/docker/envs/vectorstores/weaviate.env.example b/docker/envs/vectorstores/weaviate.env.example new file mode 100644 index 0000000000..82a3ccb172 --- /dev/null +++ b/docker/envs/vectorstores/weaviate.env.example @@ -0,0 +1,18 @@ +# ------------------------------ +# Weaviate Configuration +# ------------------------------ + +WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai +WEAVIATE_DISABLE_TELEMETRY=false +WEAVIATE_ENABLE_TOKENIZER_GSE=false +WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false +WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false diff --git a/docker/generate_docker_compose b/docker/generate_docker_compose index 46d948f3c1..580091e006 100755 --- a/docker/generate_docker_compose +++ b/docker/generate_docker_compose @@ -64,25 +64,61 @@ def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"): return "\n".join(lines) -def insert_shared_env(template_path, output_path, shared_env_block, header_comments): +def create_env_files_from_example(env_example_path): """ - Inserts the shared environment variables block and header comments into the template file, - removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file. - Always writes with LF line endings. + Creates actual env files from .env.example by copying the categorized .env.example files. + This allows docker-compose to use env_file references. + Supports per-module structure with subdirectories. + """ + base_dir = os.path.dirname(os.path.abspath(env_example_path)) + root_env_file = os.path.join(base_dir, ".env") + if not os.path.exists(root_env_file): + with open(env_example_path, "r", encoding="utf-8") as src, open( + root_env_file, "w", encoding="utf-8", newline="\n" + ) as dst: + dst.write(src.read()) + print(f"Created {root_env_file}") + else: + print(f"{root_env_file} already exists, skipping") + + envs_dir = os.path.join(base_dir, "envs") + if not os.path.isdir(envs_dir): + print(f"No envs directory found at {envs_dir}, skipping split env files") + return [] + + created_files = [] + # Walk through all .env.example files in subdirectories + for root, dirs, files in os.walk(envs_dir): + for file in files: + if file.endswith('.env.example'): + example_file = os.path.join(root, file) + env_file = example_file.replace('.env.example', '.env') + + if os.path.exists(env_file): + print(f"{env_file} already exists, skipping") + continue + + # Copy .example to actual file + with open(example_file, "r", encoding="utf-8") as src, open( + env_file, "w", encoding="utf-8", newline="\n" + ) as dst: + dst.write(src.read()) + created_files.append(env_file) + print(f"Created {env_file}") + + return created_files + + +def insert_shared_env(template_path, output_path, header_comments): + """ + Copies the template file to output path with header comments. + The template now uses env_file references instead of a huge YAML anchor. """ with open(template_path, "r", encoding="utf-8") as f: template_content = f.read() - # Remove existing x-shared-env: &shared-api-worker-env lines - template_content = re.sub( - r"^x-shared-env: &shared-api-worker-env\s*\n?", - "", - template_content, - flags=re.MULTILINE, - ) - - # Prepare the final content with header comments and shared env block - final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}" + # Prepare the final content with header comments + final_content = f"{header_comments}\n{template_content}" with open(output_path, "w", encoding="utf-8", newline="\n") as f: f.write(final_content) @@ -90,10 +126,10 @@ def insert_shared_env(template_path, output_path, shared_env_block, header_comme def main(): - env_example_path = ".env.example" - template_path = "docker-compose-template.yaml" - output_path = "docker-compose.yaml" - anchor_name = "shared-api-worker-env" # Can be modified as needed + base_dir = os.path.dirname(os.path.abspath(__file__)) + env_example_path = os.path.join(base_dir, ".env.example") + template_path = os.path.join(base_dir, "docker-compose-template.yaml") + output_path = os.path.join(base_dir, "docker-compose.yaml") # Define header comments to be added at the top of docker-compose.yaml header_comments = ( @@ -110,17 +146,14 @@ def main(): print(f"Error: File {path} does not exist.") sys.exit(1) - # Parse .env.example file - env_vars = parse_env_example(env_example_path) + # Create env files from categorized .env.example files + # These files are used by docker-compose's env_file directive + # This ensures .env files exist even in CI/CD environments + create_env_files_from_example(env_example_path) - if not env_vars: - print("Warning: No environment variables found in .env.example.") - - # Generate shared environment variables block - shared_env_block = generate_shared_env_block(env_vars, anchor_name) - - # Insert shared environment variables block and header comments into the template - insert_shared_env(template_path, output_path, shared_env_block, header_comments) + # Copy template to output with header comments + # The template now uses env_file references instead of a huge YAML anchor + insert_shared_env(template_path, output_path, header_comments) if __name__ == "__main__": diff --git a/e2e/scripts/common.ts b/e2e/scripts/common.ts index ea6c897b2d..2964892dd0 100644 --- a/e2e/scripts/common.ts +++ b/e2e/scripts/common.ts @@ -36,7 +36,7 @@ export const webDir = path.join(rootDir, 'web') export const middlewareComposeFile = path.join(dockerDir, 'docker-compose.middleware.yaml') export const middlewareEnvFile = path.join(dockerDir, 'middleware.env') -export const middlewareEnvExampleFile = path.join(dockerDir, 'middleware.env.example') +export const middlewareEnvExampleFile = path.join(dockerDir, 'envs', 'middleware.env.example') export const webEnvLocalFile = path.join(webDir, '.env.local') export const webEnvExampleFile = path.join(webDir, '.env.example') export const apiEnvExampleFile = path.join(apiDir, 'tests', 'integration_tests', '.env.example') diff --git a/eslint-suppressions.json b/eslint-suppressions.json index b5e67df509..2326e92d2f 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -202,6 +202,11 @@ "count": 1 } }, + "web/app/components/app/annotation/add-annotation-modal/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/app/annotation/batch-add-annotation-modal/index.tsx": { "erasable-syntax-only/enums": { "count": 1 @@ -230,6 +235,11 @@ "count": 1 } }, + "web/app/components/app/annotation/edit-annotation-modal/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/app/annotation/header-opts/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -252,6 +262,9 @@ "erasable-syntax-only/enums": { "count": 1 }, + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 5 }, @@ -259,21 +272,11 @@ "count": 1 } }, - "web/app/components/app/app-access-control/specific-groups-or-members.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/app-publisher/features-wrapper.tsx": { "ts/no-explicit-any": { "count": 4 } }, - "web/app/components/app/app-publisher/index.tsx": { - "ts/no-explicit-any": { - "count": 5 - } - }, "web/app/components/app/app-publisher/version-info-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -315,11 +318,6 @@ "count": 4 } }, - "web/app/components/app/configuration/config-var/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config-var/select-var-type.tsx": { "ts/no-explicit-any": { "count": 1 @@ -333,17 +331,15 @@ "count": 1 } }, - "web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config/agent/agent-tools/index.tsx": { "ts/no-explicit-any": { "count": 9 } }, "web/app/components/app/configuration/config/agent/agent-tools/setting-built-in-tool.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react-hooks/exhaustive-deps": { "count": 1 }, @@ -401,6 +397,16 @@ "count": 2 } }, + "web/app/components/app/configuration/configuration-view.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, + "web/app/components/app/configuration/dataset-config/card-item/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/app/configuration/dataset-config/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -531,6 +537,9 @@ } }, "web/app/components/app/log/list.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 6 }, @@ -569,17 +578,15 @@ "count": 2 } }, - "web/app/components/app/workflow-log/detail.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/workflow-log/filter.tsx": { "react-refresh/only-export-components": { "count": 1 } }, "web/app/components/app/workflow-log/list.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 2 } @@ -904,6 +911,11 @@ "count": 1 } }, + "web/app/components/base/drawer-plus/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/base/emoji-picker/index.tsx": { "no-restricted-imports": { "count": 1 @@ -935,11 +947,6 @@ "count": 1 } }, - "web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/new-feature-panel/annotation-reply/index.tsx": { "ts/no-explicit-any": { "count": 3 @@ -973,11 +980,6 @@ "count": 2 } }, - "web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -1029,6 +1031,11 @@ "count": 3 } }, + "web/app/components/base/float-right-container/index.tsx": { + "no-restricted-imports": { + "count": 2 + } + }, "web/app/components/base/form/components/base/base-form.tsx": { "ts/no-explicit-any": { "count": 6 @@ -1233,7 +1240,7 @@ }, "web/app/components/base/icons/src/vender/line/development/index.ts": { "no-barrel-files/no-barrel-files": { - "count": 2 + "count": 1 } }, "web/app/components/base/icons/src/vender/line/editor/index.ts": { @@ -2059,11 +2066,6 @@ "count": 4 } }, - "web/app/components/datasets/documents/components/operations.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/components/rename-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -2079,11 +2081,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.tsx": { "react/set-state-in-effect": { "count": 5 @@ -2129,11 +2126,6 @@ "count": 2 } }, - "web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/steps/index.ts": { "no-barrel-files/no-barrel-files": { "count": 3 @@ -2144,14 +2136,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/batch-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/common/chunk-content.tsx": { "react/set-state-in-effect": { "count": 1 @@ -2162,21 +2146,11 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/components/index.ts": { - "no-barrel-files/no-barrel-files": { - "count": 3 - } - }, "web/app/components/datasets/documents/detail/completed/components/segment-list-content.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/display-toggle.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 5 @@ -2193,11 +2167,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/segment-card/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/context.ts": { "ts/no-explicit-any": { "count": 1 @@ -2231,14 +2200,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/segment-add/index.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, - "react-refresh/only-export-components": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/settings/pipeline-settings/index.tsx": { "ts/no-explicit-any": { "count": 6 @@ -2280,6 +2241,9 @@ } }, "web/app/components/datasets/hit-testing/index.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/unsupported-syntax": { "count": 1 } @@ -2291,7 +2255,7 @@ }, "web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 } }, "web/app/components/datasets/metadata/hooks/use-edit-dataset-metadata.ts": { @@ -2552,11 +2516,6 @@ "count": 4 } }, - "web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/model-auth/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 6 @@ -2583,9 +2542,6 @@ } }, "web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 3 } @@ -2611,9 +2567,6 @@ } }, "web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -2628,11 +2581,6 @@ "count": 2 } }, - "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-load-balancing-configs.tsx": { "ts/no-explicit-any": { "count": 5 @@ -2813,10 +2761,18 @@ } }, "web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 7 } }, + "web/app/components/plugins/plugin-detail-panel/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/plugins/plugin-detail-panel/model-list.tsx": { "ts/no-explicit-any": { "count": 1 @@ -2838,6 +2794,9 @@ } }, "web/app/components/plugins/plugin-detail-panel/strategy-detail.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 2 } @@ -2870,11 +2829,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/subscription-card.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/types.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -2885,25 +2839,20 @@ "count": 7 } }, - "web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/tool-selector/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 2 } }, "web/app/components/plugins/plugin-detail-panel/trigger/event-detail-drawer.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 5 } }, "web/app/components/plugins/plugin-item/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -2933,16 +2882,6 @@ "count": 1 } }, - "web/app/components/plugins/readme-panel/index.tsx": { - "react/unsupported-syntax": { - "count": 1 - } - }, - "web/app/components/plugins/readme-panel/store.ts": { - "erasable-syntax-only/enums": { - "count": 1 - } - }, "web/app/components/plugins/reference-setting-modal/auto-update-setting/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -3005,11 +2944,6 @@ "count": 1 } }, - "web/app/components/rag-pipeline/components/panel/input-field/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/rag-pipeline/components/panel/test-run/preparation/document-processing/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3179,6 +3113,9 @@ } }, "web/app/components/tools/edit-custom-collection-modal/index.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 4 }, @@ -3187,6 +3124,9 @@ } }, "web/app/components/tools/edit-custom-collection-modal/test-api.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 1 } @@ -3196,6 +3136,11 @@ "count": 1 } }, + "web/app/components/tools/mcp/detail/provider-detail.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/tools/mcp/mcp-server-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -3224,12 +3169,20 @@ "count": 1 } }, + "web/app/components/tools/provider/detail.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/tools/provider/empty.tsx": { "ts/no-explicit-any": { "count": 1 } }, "web/app/components/tools/setting/build-in/config-credentials.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 3 } @@ -3338,11 +3291,6 @@ "count": 1 } }, - "web/app/components/workflow/block-selector/tabs.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/block-selector/tool/tool-list-flat-view/list.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3609,11 +3557,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/memory-config.tsx": { "unicorn/prefer-number-properties": { "count": 1 @@ -3649,11 +3592,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/variable/match-schema-type.ts": { "ts/no-explicit-any": { "count": 8 @@ -4008,11 +3946,6 @@ "count": 5 } }, - "web/app/components/workflow/nodes/iteration-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/iteration/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4033,11 +3966,6 @@ "count": 4 } }, - "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/search-method-option.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/type.ts": { "ts/no-explicit-any": { "count": 2 @@ -4061,13 +3989,13 @@ "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-item.tsx": { - "ts/no-explicit-any": { + "web/app/components/workflow/nodes/knowledge-retrieval/components/dataset-item.tsx": { + "no-restricted-imports": { "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx": { - "no-restricted-imports": { + "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-item.tsx": { + "ts/no-explicit-any": { "count": 1 } }, @@ -4193,11 +4121,6 @@ "count": 7 } }, - "web/app/components/workflow/nodes/loop-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/condition-list/condition-input.tsx": { "ts/no-explicit-any": { "count": 1 @@ -4259,11 +4182,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/parameter-extractor/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/parameter-extractor/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -4282,11 +4200,6 @@ "count": 9 } }, - "web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/question-classifier/components/class-item.tsx": { "react/set-state-in-effect": { "count": 1 @@ -4305,11 +4218,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/question-classifier/node.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/question-classifier/use-config.ts": { "react/set-state-in-effect": { "count": 2 @@ -4417,9 +4325,6 @@ } }, "web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -4475,11 +4380,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/trigger-webhook/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/utils.ts": { "ts/no-explicit-any": { "count": 1 @@ -4590,9 +4490,6 @@ } }, "web/app/components/workflow/panel/env-panel/variable-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 4 }, @@ -4823,9 +4720,6 @@ } }, "web/app/components/workflow/variable-inspect/listening.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -4864,26 +4758,11 @@ "count": 5 } }, - "web/app/components/workflow/workflow-preview/components/nodes/base.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/workflow-preview/components/nodes/constants.ts": { "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/workflow-preview/components/zoom-in-out.tsx": { "erasable-syntax-only/enums": { "count": 1 diff --git a/packages/dify-ui/README.md b/packages/dify-ui/README.md index bdeeec33cb..c78faede89 100644 --- a/packages/dify-ui/README.md +++ b/packages/dify-ui/README.md @@ -28,6 +28,7 @@ Always import from a **subpath export** — there is no barrel: import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' import { Dialog, DialogContent, DialogTrigger } from '@langgenius/dify-ui/dialog' +import { Drawer, DrawerPopup, DrawerTrigger } from '@langgenius/dify-ui/drawer' import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import '@langgenius/dify-ui/styles.css' // once, in the app root ``` @@ -36,12 +37,12 @@ Importing from `@langgenius/dify-ui` (no subpath) is intentionally not supported ## Primitives -| Category | Subpath | Notes | -| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | -| Overlay | `./alert-dialog`, `./autocomplete`, `./combobox`, `./context-menu`, `./dialog`, `./dropdown-menu`, `./popover`, `./select`, `./toast`, `./tooltip` | Portalled. See [Overlay & portal contract] below. | -| Form | `./autocomplete`, `./combobox`, `./number-field`, `./slider`, `./switch` | Controlled / uncontrolled per Base UI defaults. | -| Layout | `./scroll-area` | Custom-styled scrollbar over the host viewport. | -| Media | `./avatar`, `./button` | Button exposes `cva` variants. | +| Category | Subpath | Notes | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| Overlay | `./alert-dialog`, `./autocomplete`, `./combobox`, `./context-menu`, `./dialog`, `./drawer`, `./dropdown-menu`, `./popover`, `./select`, `./toast`, `./tooltip` | Portalled. See [Overlay & portal contract] below. | +| Form | `./autocomplete`, `./combobox`, `./number-field`, `./slider`, `./switch` | Controlled / uncontrolled per Base UI defaults. | +| Layout | `./scroll-area` | Custom-styled scrollbar over the host viewport. | +| Media | `./avatar`, `./button` | Button exposes `cva` variants. | Utilities: @@ -65,7 +66,7 @@ If a consumer uses Dify UI source files through the workspace, add an explicit s ## Overlay & portal contract -All overlay primitives (`dialog`, `alert-dialog`, `autocomplete`, `combobox`, `popover`, `dropdown-menu`, `context-menu`, `select`, `tooltip`, `toast`) render their content inside a [Base UI Portal] attached to `document.body`. This is the Base UI default — see the upstream [Portals][Base UI Portal] docs for the underlying behavior. Consumers **do not** need to wrap anything in a portal manually. +Overlay primitives render their floating surfaces inside a [Base UI Portal] attached to `document.body`. This is the Base UI default — see the upstream [Portals][Base UI Portal] docs for the underlying behavior. Convenience content components such as `DialogContent`, `PopoverContent`, and `SelectContent` own their portal internally; primitives with explicit portal anatomy such as `Drawer` expose the matching `DrawerPortal` part so consumers can compose the full Base UI structure. ### Root isolation requirement @@ -83,21 +84,28 @@ Equivalent: any root element with `isolation: isolate` in CSS. Without it, overl Every overlay primitive uses a single, shared z-index. Do **not** override it at call sites. -| Layer | z-index | Where | -| ----------------------------------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------- | -| Overlays (Dialog, AlertDialog, Autocomplete, Combobox, Popover, DropdownMenu, ContextMenu, Select, Tooltip) | `z-1002` | Positioner / Backdrop | -| Toast viewport | `z-1003` | One layer above overlays so notifications are never hidden under a dialog. | +| Layer | z-index | Where | +| ------------------------------------------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------- | +| Overlays (Dialog, AlertDialog, Autocomplete, Combobox, Drawer, Popover, DropdownMenu, ContextMenu, Select, Tooltip) | `z-1002` | Positioner / Backdrop | +| Toast viewport | `z-1003` | One layer above overlays so notifications are never hidden under a dialog. | -Rationale: during Dify's migration from legacy `base/modal` / `base/dialog` overlays to this package, new and old overlays coexist in the DOM. `z-1002` sits above any common legacy layer, eliminating per-call-site z-index hacks. Among themselves, new primitives share the same z-index and **rely on DOM order** for stacking — the portal mounted later wins. +Rationale: during Dify's migration from legacy `base/modal` / `base/dialog` / `base/drawer` / `base/drawer-plus` overlays to this package, new and old overlays coexist in the DOM. `z-1002` sits above any common legacy layer, eliminating per-call-site z-index hacks. Among themselves, new primitives share the same z-index and **rely on DOM order** for stacking — the portal mounted later wins. See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for the Dify-web migration history. Once the legacy overlays are gone, the values in this table can drop back to `z-50` / `z-51`. ### Rules - Never add `z-1003` / `z-9999` / etc. overrides on primitives from this package. If something is getting clipped, the **parent** overlay (typically a legacy one) is the problem and should be migrated. -- Never portal an overlay manually on top of our primitives — use `DialogTrigger`, `PopoverTrigger`, etc. Base UI handles focus management, scroll-locking, and dismissal. +- Never create an extra manual portal on top of our primitives — use the exported content / portal parts such as `DialogContent`, `PopoverContent`, and `DrawerPortal`. Base UI handles focus management, scroll-locking, and dismissal. - When a primitive needs additional presentation chrome (e.g. a custom backdrop), add it **inside** the exported component, not at call sites. +### Tooltip, infotip, and popover semantics + +- Use `Tooltip` only for short, non-interactive visual labels. The trigger must already have visible text or an `aria-label`; the tooltip is not the accessible name and must not contain links, buttons, forms, or structured prose. +- Use `Popover` for explanatory content, long text, rich layout, or anything users may need to reach on touch or with assistive technology. In `web/`, the `Infotip` wrapper is the preferred pattern for a `?` help glyph backed by `Popover`. +- Pick a `placement` and let the primitive own spacing. Avoid per-call-site offsets unless the component API explicitly needs a measured layout exception. +- When passing a Base UI trigger `render` prop, render a real ` diff --git a/web/app/(shareLayout)/components/authenticated-layout.tsx b/web/app/(shareLayout)/components/authenticated-layout.tsx index a7b65f33fe..3ee5d52603 100644 --- a/web/app/(shareLayout)/components/authenticated-layout.tsx +++ b/web/app/(shareLayout)/components/authenticated-layout.tsx @@ -39,7 +39,9 @@ const AuthenticatedLayout = ({ children }: { children: React.ReactNode }) => { const getSigninUrl = useCallback(() => { const params = new URLSearchParams(searchParams) params.delete('message') - params.set('redirect_url', pathname) + const query = params.toString() + const fullPath = query ? `${pathname}?${query}` : pathname + params.set('redirect_url', fullPath) return `/webapp-signin?${params.toString()}` }, [searchParams, pathname]) diff --git a/web/app/account/(commonLayout)/avatar.tsx b/web/app/account/(commonLayout)/avatar.tsx index ccae182c9a..3fefb8a319 100644 --- a/web/app/account/(commonLayout)/avatar.tsx +++ b/web/app/account/(commonLayout)/avatar.tsx @@ -1,11 +1,13 @@ 'use client' -import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' import { Avatar } from '@langgenius/dify-ui/avatar' +import { cn } from '@langgenius/dify-ui/cn' import { - RiGraduationCapFill, -} from '@remixicon/react' + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from '@langgenius/dify-ui/dropdown-menu' import { useSuspenseQuery } from '@tanstack/react-query' -import { Fragment } from 'react' import { useTranslation } from 'react-i18next' import { resetUser } from '@/app/components/base/amplitude/utils' import { LogOut01 } from '@/app/components/base/icons/src/vender/line/general' @@ -38,73 +40,48 @@ export default function AppSelector() { } return ( - - { - ({ open }) => ( - <> -
- - - + + + + + +
+
+
+
+ {userProfile.name} + {isEducationAccount && ( + + + EDU + + )} +
+
{userProfile.email}
- - - -
-
-
-
- {userProfile.name} - {isEducationAccount && ( - - - EDU - - )} -
-
{userProfile.email}
-
- -
-
-
- -
handleLogout()}> -
- -
{t('userProfile.logout', { ns: 'common' })}
-
-
-
-
-
- - ) - } -
+ + + +
+ + + {t('userProfile.logout', { ns: 'common' })} + +
+ + ) } diff --git a/web/app/components/app-sidebar/app-info/app-info-detail-panel.tsx b/web/app/components/app-sidebar/app-info/app-info-detail-panel.tsx index 9afa0063dc..3dabb2a91e 100644 --- a/web/app/components/app-sidebar/app-info/app-info-detail-panel.tsx +++ b/web/app/components/app-sidebar/app-info/app-info-detail-panel.tsx @@ -97,7 +97,7 @@ const AppInfoDetailPanel = ({
diff --git a/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx b/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx index 944a8563eb..5e7b2dc1d0 100644 --- a/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx +++ b/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx @@ -3,7 +3,7 @@ import type { ComponentProps } from 'react' import type { Mock } from 'vitest' import type { AnnotationItemBasic } from '../../type' import type { Locale } from '@/i18n-config' -import { render, screen, waitFor } from '@testing-library/react' +import { act, render, screen, waitFor } from '@testing-library/react' import userEvent from '@testing-library/user-event' import * as React from 'react' import { useLocale } from '@/context/i18n' @@ -128,21 +128,15 @@ vi.mock('@headlessui/react', () => { } }) -let lastCSVDownloaderProps: Record | undefined -const mockCSVDownloader = vi.fn(({ children, ...props }) => { - lastCSVDownloaderProps = props - return ( -
- {children} -
- ) -}) +const mockJsonToCSV = vi.fn((_: unknown) => 'csv-content') +const mockCSVDownloader = vi.fn(({ children }) => <>{children}) vi.mock('react-papaparse', () => ({ useCSVDownloader: () => ({ CSVDownloader: (props: any) => mockCSVDownloader(props), Type: { Link: 'link' }, }), + jsonToCSV: (data: unknown) => mockJsonToCSV(data), })) vi.mock('@/service/annotation', () => ({ @@ -194,33 +188,28 @@ const openOperationsPopover = async (user: ReturnType) = const expandExportMenu = async (user: ReturnType) => { await openOperationsPopover(user) - const exportLabel = await screen.findByText('appAnnotation.table.header.bulkExport') - const exportButton = exportLabel.closest('button') as HTMLButtonElement - expect(exportButton).toBeTruthy() - await user.click(exportButton) + const exportItem = await screen.findByRole('menuitem', { name: /appAnnotation\.table\.header\.bulkExport/i }) + await user.hover(exportItem) } -const getExportButtons = async () => { - const csvLabel = await screen.findByText('CSV') - const jsonLabel = await screen.findByText('JSONL') - const csvButton = csvLabel.closest('button') as HTMLButtonElement - const jsonButton = jsonLabel.closest('button') as HTMLButtonElement - expect(csvButton).toBeTruthy() - expect(jsonButton).toBeTruthy() +const getExportItems = async () => { + const csvItem = await screen.findByRole('menuitem', { name: 'CSV' }) + const jsonItem = await screen.findByRole('menuitem', { name: 'JSONL' }) return { - csvButton, - jsonButton, + csvItem, + jsonItem, } } -const clickOperationAction = async ( - user: ReturnType, - translationKey: string, -) => { - const label = await screen.findByText(translationKey) - const button = label.closest('button') as HTMLButtonElement - expect(button).toBeTruthy() - await user.click(button) +const clickMenuItem = async (item: HTMLElement) => { + await act(async () => { + item.click() + }) +} + +const clickOperationAction = async (translationKey: string) => { + const item = await screen.findByRole('menuitem', { name: translationKey }) + await clickMenuItem(item) } const mockAnnotations: AnnotationItemBasic[] = [ @@ -237,11 +226,14 @@ describe('HeaderOptions', () => { beforeEach(() => { vi.clearAllMocks() vi.useRealTimers() - mockCSVDownloader.mockClear() - lastCSVDownloaderProps = undefined + mockJsonToCSV.mockReturnValue('csv-content') mockedFetchAnnotations.mockResolvedValue({ data: [] }) }) + afterEach(() => { + vi.restoreAllMocks() + }) + it('should fetch annotations on mount and render enabled export actions when data exist', async () => { mockedFetchAnnotations.mockResolvedValue({ data: mockAnnotations }) const user = userEvent.setup() @@ -253,22 +245,69 @@ describe('HeaderOptions', () => { await expandExportMenu(user) - const { csvButton, jsonButton } = await getExportButtons() + const { csvItem, jsonItem } = await getExportItems() - expect(csvButton).not.toBeDisabled() - expect(jsonButton).not.toBeDisabled() + expect(csvItem).not.toHaveAttribute('data-disabled') + expect(jsonItem).not.toHaveAttribute('data-disabled') - await waitFor(() => { - expect(lastCSVDownloaderProps).toMatchObject({ - bom: true, - filename: 'annotations-en-US', - type: 'link', - data: [ - ['Question', 'Answer'], - ['Question 1', 'Answer 1'], - ], + await clickMenuItem(csvItem) + + expect(mockJsonToCSV).toHaveBeenCalledWith([ + ['Question', 'Answer'], + ['Question 1', 'Answer 1'], + ]) + }) + + it('should trigger CSV download with locale-specific filename', async () => { + mockedFetchAnnotations.mockResolvedValue({ data: mockAnnotations }) + const user = userEvent.setup() + const originalCreateElement = document.createElement.bind(document) + const anchor = originalCreateElement('a') as HTMLAnchorElement + const clickSpy = vi.spyOn(anchor, 'click').mockImplementation(vi.fn()) + const createElementSpy = vi.spyOn(document, 'createElement') + .mockImplementation((tagName: Parameters[0]) => { + if (tagName === 'a') + return anchor + return originalCreateElement(tagName) }) + let capturedBlob: Blob | null = null + const objectURLSpy = vi.spyOn(URL, 'createObjectURL') + .mockImplementation((blob) => { + capturedBlob = blob as Blob + return 'blob://mock-url' + }) + const revokeSpy = vi.spyOn(URL, 'revokeObjectURL').mockImplementation(vi.fn()) + + renderComponent({}, LanguagesSupported[1]) + + await expandExportMenu(user) + + const { csvItem } = await getExportItems() + await clickMenuItem(csvItem) + + expect(mockJsonToCSV).toHaveBeenCalledWith([ + ['问题', '答案'], + ['Question 1', 'Answer 1'], + ]) + expect(createElementSpy).toHaveBeenCalled() + expect(anchor.download).toBe(`annotations-${LanguagesSupported[1]}.csv`) + expect(clickSpy).toHaveBeenCalled() + expect(revokeSpy).toHaveBeenCalledWith('blob://mock-url') + + expect(capturedBlob).toBeInstanceOf(Blob) + expect(capturedBlob!.type).toBe('text/csv;charset=utf-8;') + + const blobContent = await new Promise((resolve) => { + const reader = new FileReader() + reader.onload = () => resolve(reader.result as string) + reader.readAsText(capturedBlob!) }) + expect(blobContent).toBe('csv-content') + + clickSpy.mockRestore() + createElementSpy.mockRestore() + objectURLSpy.mockRestore() + revokeSpy.mockRestore() }) it('should disable export actions when there are no annotations', async () => { @@ -277,14 +316,11 @@ describe('HeaderOptions', () => { await expandExportMenu(user) - const { csvButton, jsonButton } = await getExportButtons() + const { csvItem, jsonItem } = await getExportItems() - expect(csvButton)!.toBeDisabled() - expect(jsonButton)!.toBeDisabled() - - expect(lastCSVDownloaderProps).toMatchObject({ - data: [['Question', 'Answer']], - }) + expect(csvItem).toHaveAttribute('data-disabled') + expect(jsonItem).toHaveAttribute('data-disabled') + expect(mockJsonToCSV).not.toHaveBeenCalled() }) it('should open the add annotation modal and forward the onAdd callback', async () => { @@ -321,7 +357,7 @@ describe('HeaderOptions', () => { renderComponent({ onAdded }) await openOperationsPopover(user) - await clickOperationAction(user, 'appAnnotation.table.header.bulkImport') + await clickOperationAction('appAnnotation.table.header.bulkImport') expect(await screen.findByText('appAnnotation.batchModal.title'))!.toBeInTheDocument() await user.click( @@ -354,10 +390,8 @@ describe('HeaderOptions', () => { await expandExportMenu(user) - await waitFor(() => expect(mockCSVDownloader).toHaveBeenCalled()) - - const { jsonButton } = await getExportButtons() - await user.click(jsonButton) + const { jsonItem } = await getExportItems() + await clickMenuItem(jsonItem) expect(createElementSpy).toHaveBeenCalled() expect(anchor.download).toBe(`annotations-${LanguagesSupported[1]}.jsonl`) @@ -396,7 +430,7 @@ describe('HeaderOptions', () => { renderComponent({ onAdded }) await openOperationsPopover(user) - await clickOperationAction(user, 'appAnnotation.table.header.clearAll') + await clickOperationAction('appAnnotation.table.header.clearAll') await screen.findByText('appAnnotation.table.header.clearAllConfirm') const confirmButton = screen.getByRole('button', { name: 'common.operation.confirm' }) @@ -416,7 +450,7 @@ describe('HeaderOptions', () => { renderComponent({ onAdded }) await openOperationsPopover(user) - await clickOperationAction(user, 'appAnnotation.table.header.clearAll') + await clickOperationAction('appAnnotation.table.header.clearAll') await screen.findByText('appAnnotation.table.header.clearAllConfirm') const confirmButton = screen.getByRole('button', { name: 'common.operation.confirm' }) await user.click(confirmButton) diff --git a/web/app/components/app/annotation/header-opts/index.tsx b/web/app/components/app/annotation/header-opts/index.tsx index fc27524c71..6814c3692c 100644 --- a/web/app/components/app/annotation/header-opts/index.tsx +++ b/web/app/components/app/annotation/header-opts/index.tsx @@ -1,19 +1,21 @@ 'use client' import type { FC } from 'react' import type { AnnotationItemBasic } from '../type' -import { Menu, MenuButton, MenuItems, Transition } from '@headlessui/react' import { Button } from '@langgenius/dify-ui/button' -import { cn } from '@langgenius/dify-ui/cn' import { DropdownMenu, DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSub, + DropdownMenuSubContent, + DropdownMenuSubTrigger, DropdownMenuTrigger, } from '@langgenius/dify-ui/dropdown-menu' import * as React from 'react' -import { Fragment, useEffect, useState } from 'react' +import { useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' import { - useCSVDownloader, + jsonToCSV, } from 'react-papaparse' import { useLocale } from '@/context/i18n' @@ -54,6 +56,15 @@ const downloadAnnotationJsonl = (list: AnnotationItemBasic[], locale: string) => downloadBlob({ data: file, fileName: `annotations-${locale}.jsonl` }) } +const downloadAnnotationCsv = (list: AnnotationItemBasic[], locale: string) => { + const content = jsonToCSV([ + locale !== LanguagesSupported[1] ? CSV_HEADER_QA_EN : CSV_HEADER_QA_CN, + ...list.map(item => [item.question, item.answer]), + ]) + const file = new Blob([`\uFEFF${content}`], { type: 'text/csv;charset=utf-8;' }) + downloadBlob({ data: file, fileName: `annotations-${locale}.csv` }) +} + const OperationsMenu: FC = ({ list, onClose, @@ -63,88 +74,62 @@ const OperationsMenu: FC = ({ }) => { const { t } = useTranslation() const locale = useLocale() - const { CSVDownloader, Type } = useCSVDownloader() const annotationUnavailable = list.length === 0 return ( -
- - - - - {t('table.header.bulkExport', { ns: 'appAnnotation' })} - - - + {t('table.header.bulkImport', { ns: 'appAnnotation' })} + + + + + {t('table.header.bulkExport', { ns: 'appAnnotation' })} + + - { + onClose() + downloadAnnotationCsv(list, locale) + }} > - [item.question, item.answer]), - ]} - > - - - - - - - -
+ + {t('table.header.clearAll', { ns: 'appAnnotation' })} + + ) } @@ -204,7 +189,7 @@ const HeaderOptions: FC = ({
{t('table.header.addAnnotation', { ns: 'appAnnotation' })}
- + = ({ { , ) - const closeButton = document.body.querySelector('div.absolute.right-5.top-5') as HTMLElement - fireEvent.click(closeButton) + fireEvent.click(screen.getByRole('button', { name: 'Close' })) await waitFor(() => { expect(onClose).toHaveBeenCalledTimes(1) diff --git a/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx b/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx index 21dd8c5fc2..4aaea1670f 100644 --- a/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx +++ b/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx @@ -176,7 +176,7 @@ describe('AccessControlItem', () => { }) }) -// AccessControlDialog renders a headless UI dialog with a manual close control +// AccessControlDialog renders the shared dialog primitive with a close control. describe('AccessControlDialog', () => { it('should render dialog content when visible', () => { render( @@ -191,13 +191,13 @@ describe('AccessControlDialog', () => { it('should trigger onClose when clicking the close control', async () => { const handleClose = vi.fn() - const { container } = render( + render(
Dialog Content
, ) - const closeButton = container.querySelector('.absolute.right-5.top-5') as HTMLElement + const closeButton = screen.getByRole('button', { name: 'Close' }) fireEvent.click(closeButton) await waitFor(() => { diff --git a/web/app/components/app/app-access-control/access-control-dialog.tsx b/web/app/components/app/app-access-control/access-control-dialog.tsx index bbf5329c9d..611c6f1c92 100644 --- a/web/app/components/app/app-access-control/access-control-dialog.tsx +++ b/web/app/components/app/app-access-control/access-control-dialog.tsx @@ -1,8 +1,11 @@ import type { ReactNode } from 'react' -import { Dialog, Transition } from '@headlessui/react' import { cn } from '@langgenius/dify-ui/cn' -import { RiCloseLine } from '@remixicon/react' -import { Fragment, useCallback } from 'react' +import { + Dialog, + DialogCloseButton, + DialogContent, +} from '@langgenius/dify-ui/dialog' +import { useCallback } from 'react' type DialogProps = { className?: string @@ -21,40 +24,12 @@ const AccessControlDialog = ({ onClose?.() }, [onClose]) return ( - - null}> - -
- - -
- - -
close()} className="absolute top-5 right-5 z-10 flex h-8 w-8 cursor-pointer items-center justify-center"> - -
- {children} -
-
-
-
-
+ !open && close()}> + + + {children} + + ) } diff --git a/web/app/components/app/app-access-control/index.tsx b/web/app/components/app/app-access-control/index.tsx index cff670e10f..593664c918 100644 --- a/web/app/components/app/app-access-control/index.tsx +++ b/web/app/components/app/app-access-control/index.tsx @@ -1,8 +1,8 @@ 'use client' import type { Subject } from '@/models/access-control' import type { App } from '@/types/app' -import { Description as DialogDescription, DialogTitle } from '@headlessui/react' import { Button } from '@langgenius/dify-ui/button' +import { DialogDescription, DialogTitle } from '@langgenius/dify-ui/dialog' import { toast } from '@langgenius/dify-ui/toast' import { RiBuildingLine, RiGlobalLine, RiVerifiedBadgeLine } from '@remixicon/react' import { useSuspenseQuery } from '@tanstack/react-query' diff --git a/web/app/components/app/app-access-control/specific-groups-or-members.tsx b/web/app/components/app/app-access-control/specific-groups-or-members.tsx index 35e6b1cc19..2cacd2cf03 100644 --- a/web/app/components/app/app-access-control/specific-groups-or-members.tsx +++ b/web/app/components/app/app-access-control/specific-groups-or-members.tsx @@ -1,14 +1,14 @@ 'use client' import type { AccessControlAccount, AccessControlGroup } from '@/models/access-control' import { Avatar } from '@langgenius/dify-ui/avatar' -import { RiAlertFill, RiCloseCircleFill, RiLockLine, RiOrganizationChart } from '@remixicon/react' +import { RiCloseCircleFill, RiLockLine, RiOrganizationChart } from '@remixicon/react' import { useCallback, useEffect } from 'react' import { useTranslation } from 'react-i18next' import { AccessMode } from '@/models/access-control' import { useAppWhiteListSubjects } from '@/service/access-control' import useAccessControlStore from '../../../../context/access-control-store' +import { Infotip } from '../../base/infotip' import Loading from '../../base/loading' -import Tooltip from '../../base/tooltip' import AddMemberOrGroupDialog from './add-member-or-group-pop' export default function SpecificGroupsOrMembers() { @@ -137,9 +137,14 @@ function BaseItem({ icon, onRemove, children }: BaseItemProps) { export function WebAppSSONotEnabledTip() { const { t } = useTranslation() + const tip = t('accessControlDialog.webAppSSONotEnabledTip', { ns: 'app' }) + return ( - - - + + {tip} + ) } diff --git a/web/app/components/app/app-publisher/__tests__/index.spec.tsx b/web/app/components/app/app-publisher/__tests__/index.spec.tsx index cbfd679ace..0dfb4347e4 100644 --- a/web/app/components/app/app-publisher/__tests__/index.spec.tsx +++ b/web/app/components/app/app-publisher/__tests__/index.spec.tsx @@ -20,6 +20,7 @@ const mockOpenAsyncWindow = vi.fn() const mockFetchInstalledAppList = vi.fn() const mockFetchAppDetailDirect = vi.fn() const mockToastError = vi.fn() +const mockWindowOpen = vi.fn() const mockInvalidateAppWorkflow = vi.fn() const sectionProps = vi.hoisted(() => ({ @@ -37,6 +38,7 @@ vi.mock('react-i18next', () => ({ useTranslation: () => ({ t: (key: string) => key, }), + Trans: ({ i18nKey }: { i18nKey?: string }) => i18nKey ?? null, })) vi.mock('ahooks', async () => { @@ -91,6 +93,21 @@ vi.mock('@/service/use-workflow', () => ({ useInvalidateAppWorkflow: () => mockInvalidateAppWorkflow, })) +vi.mock('@/service/use-tools', () => ({ + useWorkflowToolDetailByAppID: () => ({ + data: undefined, + isLoading: false, + }), + useInvalidateAllWorkflowTools: () => vi.fn(), + useInvalidateWorkflowToolDetailByAppID: () => vi.fn(), +})) + +vi.mock('@/context/app-context', () => ({ + useAppContext: () => ({ + isCurrentWorkspaceManager: true, + }), +})) + vi.mock('@langgenius/dify-ui/toast', () => ({ toast: { error: (...args: unknown[]) => mockToastError(...args), @@ -121,6 +138,15 @@ vi.mock('../../app-access-control', () => ({ ), })) +vi.mock('@/app/components/tools/workflow-tool', () => ({ + WorkflowToolDrawer: ({ onHide }: { onHide: () => void }) => ( +
+ workflow tool drawer + +
+ ), +})) + vi.mock('@langgenius/dify-ui/popover', () => import('@/__mocks__/base-ui-popover')) vi.mock('../sections', () => ({ @@ -143,6 +169,13 @@ vi.mock('../sections', () => ({
+ {props.handleOpenRunConfig && ( + <> + + + + )} +
) }, @@ -175,6 +208,10 @@ describe('AppPublisher', () => { mockOpenAsyncWindow.mockImplementation(async (resolver: () => Promise) => { await resolver() }) + Object.defineProperty(window, 'open', { + writable: true, + value: mockWindowOpen, + }) }) it('should open the publish popover and refetch access permission data', async () => { @@ -231,6 +268,94 @@ describe('AppPublisher', () => { expect(screen.getByTestId('embedded-modal'))!.toBeInTheDocument() }) + it('should collect hidden inputs before opening published run links from config actions', async () => { + render( + , + ) + + fireEvent.click(screen.getByText('common.publish')) + fireEvent.click(screen.getByText('publisher-run-config')) + + expect(screen.getByText('overview.appInfo.workflowLaunchHiddenInputs.title')).toBeInTheDocument() + + fireEvent.change(screen.getByLabelText('Secret'), { + target: { value: 'top-secret' }, + }) + fireEvent.click(screen.getByRole('button', { name: 'overview.appInfo.launch' })) + + await waitFor(() => { + expect(mockWindowOpen).toHaveBeenCalledWith( + `https://example.com${basePath}/chat/token-1?secret=${encodeURIComponent('top-secret')}`, + '_blank', + ) + }) + }) + + it('should open batch run config links with the configured hidden inputs', async () => { + mockAppDetail = { + ...mockAppDetail, + mode: AppModeEnum.WORKFLOW, + } + + render( + , + ) + + fireEvent.click(screen.getByText('common.publish')) + fireEvent.click(screen.getByText('publisher-batch-run-config')) + + fireEvent.change(screen.getByLabelText('Batch Secret'), { + target: { value: 'batch-value' }, + }) + fireEvent.click(screen.getByRole('button', { name: 'overview.appInfo.launch' })) + + await waitFor(() => { + expect(mockWindowOpen).toHaveBeenCalledWith( + `https://example.com${basePath}/workflow/token-1?mode=batch&batch_secret=${encodeURIComponent('batch-value')}`, + '_blank', + ) + }) + }) + + it('should keep workflow tool drawer mounted after closing the publish popover', () => { + mockAppDetail = { + ...mockAppDetail, + mode: AppModeEnum.WORKFLOW, + } + + render( + , + ) + + fireEvent.click(screen.getByText('common.publish')) + fireEvent.click(screen.getByText('publisher-workflow-tool')) + + expect(screen.queryByTestId('popover-content')).not.toBeInTheDocument() + expect(screen.getByTestId('workflow-tool-drawer')).toBeInTheDocument() + }) + it('should close embedded and access control panels through child callbacks', async () => { render( ({ })) vi.mock('../suggested-action', () => ({ - default: ({ children, onClick, link, disabled }: { children: ReactNode, onClick?: () => void, link?: string, disabled?: boolean }) => ( - + default: ({ + children, + onClick, + link, + disabled, + actionButton, + }: { + children: ReactNode + onClick?: () => void + link?: string + disabled?: boolean + actionButton?: { ariaLabel: string, onClick: () => void } + }) => ( +
+ + {actionButton && ( + + )} +
), })) @@ -170,9 +194,25 @@ describe('app-publisher sections', () => { expect(render().container).toBeEmptyDOMElement() }) + it('should hide access control content when enabled is false', () => { + render( + , + ) + + expect(screen.queryByText('publishApp.title')).not.toBeInTheDocument() + expect(screen.queryByText('accessControlDialog.accessItems.anyone')).not.toBeInTheDocument() + }) + it('should render workflow actions, batch run links, and workflow tool configuration', () => { const handleOpenInExplore = vi.fn() const handleEmbed = vi.fn() + const handleOpenRunConfig = vi.fn() const { rerender } = render( { disabledFunctionTooltip="disabled" handleEmbed={handleEmbed} handleOpenInExplore={handleOpenInExplore} + handleOpenRunConfig={handleOpenRunConfig} handlePublish={vi.fn()} hasHumanInputNode={false} hasTriggerNode={false} - inputs={[]} missingStartNode={false} - onRefreshData={vi.fn()} - outputs={[]} - published={true} + published={false} publishedAt={Date.now()} + showBatchRunConfig + showRunConfig toolPublished workflowToolAvailable={false} + workflowToolIsLoading={false} + workflowToolOutdated={false} + workflowToolIsCurrentWorkspaceManager workflowToolMessage="workflow-disabled" + onConfigureWorkflowTool={vi.fn()} />, ) expect(screen.getByText('common.batchRunApp')).toHaveAttribute('data-link', 'https://example.com/app?mode=batch') + fireEvent.click(screen.getAllByRole('button', { name: 'operation.config' })[0]!) + expect(handleOpenRunConfig).toHaveBeenCalledWith('https://example.com/app') + fireEvent.click(screen.getAllByRole('button', { name: 'operation.config' })[1]!) + expect(handleOpenRunConfig).toHaveBeenCalledWith('https://example.com/app?mode=batch') fireEvent.click(screen.getByText('common.openInExplore')) expect(handleOpenInExplore).toHaveBeenCalled() expect(screen.getByText('workflow-tool-configure')).toBeInTheDocument() @@ -223,17 +271,19 @@ describe('app-publisher sections', () => { disabledFunctionTooltip="disabled" handleEmbed={handleEmbed} handleOpenInExplore={handleOpenInExplore} + handleOpenRunConfig={handleOpenRunConfig} handlePublish={vi.fn()} hasHumanInputNode={false} hasTriggerNode={false} - inputs={[]} missingStartNode - onRefreshData={vi.fn()} - outputs={[]} published={false} publishedAt={Date.now()} toolPublished={false} workflowToolAvailable + workflowToolIsLoading={false} + workflowToolOutdated={false} + workflowToolIsCurrentWorkspaceManager + onConfigureWorkflowTool={vi.fn()} />, ) @@ -248,16 +298,19 @@ describe('app-publisher sections', () => { disabledFunctionButton={false} handleEmbed={handleEmbed} handleOpenInExplore={handleOpenInExplore} + handleOpenRunConfig={handleOpenRunConfig} handlePublish={vi.fn()} hasHumanInputNode={false} hasTriggerNode - inputs={[]} missingStartNode={false} - outputs={[]} published={false} publishedAt={undefined} toolPublished={false} workflowToolAvailable + workflowToolIsLoading={false} + workflowToolOutdated={false} + workflowToolIsCurrentWorkspaceManager + onConfigureWorkflowTool={vi.fn()} />, ) diff --git a/web/app/components/app/app-publisher/__tests__/suggested-action.spec.tsx b/web/app/components/app/app-publisher/__tests__/suggested-action.spec.tsx index ea199dfb78..2ca9e77abf 100644 --- a/web/app/components/app/app-publisher/__tests__/suggested-action.spec.tsx +++ b/web/app/components/app/app-publisher/__tests__/suggested-action.spec.tsx @@ -46,4 +46,47 @@ describe('SuggestedAction', () => { expect(handleClick).toHaveBeenCalledTimes(1) }) + + it('should render and trigger the trailing action button when configured', () => { + const handleActionClick = vi.fn() + + render( + config
, + onClick: handleActionClick, + }} + > + Configurable action + , + ) + + fireEvent.click(screen.getByRole('button', { name: 'Configure action' })) + + expect(screen.getByRole('link', { name: 'Configurable action' })).toHaveAttribute('href', 'https://example.com/docs') + expect(handleActionClick).toHaveBeenCalledTimes(1) + }) + + it('should block action button clicks when disabled', () => { + const handleActionClick = vi.fn() + + render( + config
, + onClick: handleActionClick, + }} + > + Disabled with action + , + ) + + fireEvent.click(screen.getByRole('button', { name: 'Configure action' })) + expect(handleActionClick).not.toHaveBeenCalled() + }) }) diff --git a/web/app/components/app/app-publisher/index.tsx b/web/app/components/app/app-publisher/index.tsx index fe6fe5806f..f5b2c80ae8 100644 --- a/web/app/components/app/app-publisher/index.tsx +++ b/web/app/components/app/app-publisher/index.tsx @@ -1,28 +1,40 @@ +import type { FormEvent } from 'react' import type { ModelAndParameter } from '../configuration/debug/types' +import type { WorkflowHiddenStartVariable, WorkflowLaunchInputValue } from '@/app/components/app/overview/app-card-utils' import type { CollaborationUpdate } from '@/app/components/workflow/collaboration/types/collaboration' import type { InputVar, Variable } from '@/app/components/workflow/types' import type { PublishWorkflowParams } from '@/types/workflow' import { Button } from '@langgenius/dify-ui/button' import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { toast } from '@langgenius/dify-ui/toast' -import { RiStoreLine } from '@remixicon/react' import { useSuspenseQuery } from '@tanstack/react-query' import { useKeyPress } from 'ahooks' import { + memo, + use, useCallback, - useContext, useEffect, useMemo, useState, } from 'react' import { useTranslation } from 'react-i18next' +import { WorkflowLaunchDialog } from '@/app/components/app/overview/app-card-sections' +import { + buildWorkflowLaunchUrl, + createWorkflowLaunchInitialValues, + isWorkflowLaunchInputSupported, + +} from '@/app/components/app/overview/app-card-utils' import EmbeddedModal from '@/app/components/app/overview/embedded' import { useStore as useAppStore } from '@/app/components/app/store' import { trackEvent } from '@/app/components/base/amplitude' +import { WorkflowToolDrawer } from '@/app/components/tools/workflow-tool' +import { useConfigureButton } from '@/app/components/tools/workflow-tool/hooks/use-configure-button' import { collaborationManager } from '@/app/components/workflow/collaboration/core/collaboration-manager' import { webSocketClient } from '@/app/components/workflow/collaboration/core/websocket-manager' import { WorkflowContext } from '@/app/components/workflow/context' +import { appDefaultIconBackground } from '@/config' import { useAsyncWindowOpen } from '@/hooks/use-async-window-open' import { useFormatTimeFromNow } from '@/hooks/use-format-time-from-now' import { AccessMode } from '@/models/access-control' @@ -57,8 +69,8 @@ export type AppPublisherProps = { debugWithMultipleModel?: boolean multipleModelConfigs?: ModelAndParameter[] /** modelAndParameter is passed when debugWithMultipleModel is true */ - onPublish?: (params?: any) => Promise | any - onRestore?: () => Promise | any + onPublish?: AppPublisherPublishHandler + onRestore?: AppPublisherRestoreHandler onToggle?: (state: boolean) => void crossAxisOffset?: number toolPublished?: boolean @@ -74,6 +86,12 @@ export type AppPublisherProps = { const PUBLISH_SHORTCUT = ['ctrl', '⇧', 'P'] +type AppPublisherPublishHandler + = | ((params?: ModelAndParameter | PublishWorkflowParams) => Promise | unknown) + | ((params?: unknown) => Promise | unknown) + +type AppPublisherRestoreHandler = () => Promise | unknown + const AppPublisher = ({ disabled = false, publishDisabled = false, @@ -100,11 +118,15 @@ const AppPublisher = ({ const [published, setPublished] = useState(false) const [open, setOpen] = useState(false) const [showAppAccessControl, setShowAppAccessControl] = useState(false) + const [workflowToolDrawerOpen, setWorkflowToolDrawerOpen] = useState(false) const [embeddingModalOpen, setEmbeddingModalOpen] = useState(false) + const [workflowLaunchDialogOpen, setWorkflowLaunchDialogOpen] = useState(false) + const [workflowLaunchTargetUrl, setWorkflowLaunchTargetUrl] = useState('') + const [workflowLaunchValues, setWorkflowLaunchValues] = useState>({}) const [publishingToMarketplace, setPublishingToMarketplace] = useState(false) - const workflowStore = useContext(WorkflowContext) + const workflowStore = use(WorkflowContext) const appDetail = useAppStore(state => state.appDetail) const setAppDetail = useAppStore(s => s.setAppDetail) const { data: systemFeatures } = useSuspenseQuery(systemFeaturesQueryOptions()) @@ -113,6 +135,22 @@ const AppPublisher = ({ const appURL = getPublisherAppUrl({ appBaseUrl: appBaseURL, accessToken, mode: appDetail?.mode }) const isChatApp = [AppModeEnum.CHAT, AppModeEnum.AGENT_CHAT, AppModeEnum.COMPLETION].includes(appDetail?.mode || AppModeEnum.CHAT) + const hiddenLaunchVariables = useMemo( + () => (inputs ?? []).filter(input => input.hide === true), + [inputs], + ) + const supportedWorkflowLaunchVariables = useMemo( + () => hiddenLaunchVariables.filter(isWorkflowLaunchInputSupported), + [hiddenLaunchVariables], + ) + const unsupportedWorkflowLaunchVariables = useMemo( + () => hiddenLaunchVariables.filter(variable => !isWorkflowLaunchInputSupported(variable)), + [hiddenLaunchVariables], + ) + const initialWorkflowLaunchValues = useMemo( + () => createWorkflowLaunchInitialValues(supportedWorkflowLaunchVariables), + [supportedWorkflowLaunchVariables], + ) const { data: userCanAccessApp, isLoading: isGettingUserCanAccessApp, refetch } = useGetUserCanAccessApp({ appId: appDetail?.id, enabled: false }) const { data: appAccessSubjects, isLoading: isGettingAppWhiteListSubjects } = useAppWhiteListSubjects(appDetail?.id, open && systemFeatures.webapp_auth.enabled && appDetail?.access_mode === AccessMode.SPECIFIC_GROUPS_MEMBERS) @@ -222,6 +260,31 @@ const AppPublisher = ({ } }, [appDetail, setAppDetail]) + const handleOpenWorkflowLaunchDialog = useCallback((targetUrl: string) => { + setWorkflowLaunchValues(initialWorkflowLaunchValues) + setWorkflowLaunchTargetUrl(targetUrl) + setWorkflowLaunchDialogOpen(true) + }, [initialWorkflowLaunchValues]) + + const handleWorkflowLaunchValueChange = useCallback((variable: string, value: WorkflowLaunchInputValue) => { + setWorkflowLaunchValues(prev => ({ + ...prev, + [variable]: value, + })) + }, []) + + const handleWorkflowLaunchConfirm = useCallback(async (event: FormEvent) => { + event.preventDefault() + + const targetUrl = await buildWorkflowLaunchUrl({ + accessibleUrl: workflowLaunchTargetUrl, + variables: supportedWorkflowLaunchVariables, + values: workflowLaunchValues, + }) + + window.open(targetUrl, '_blank') + setWorkflowLaunchDialogOpen(false) + }, [supportedWorkflowLaunchVariables, workflowLaunchTargetUrl, workflowLaunchValues]) const handlePublishToMarketplace = useCallback(async () => { if (!appDetail?.id || publishingToMarketplace) return @@ -273,6 +336,31 @@ const AppPublisher = ({ const workflowToolMessage = !hasPublishedVersion || !workflowToolAvailable ? t('common.workflowAsToolDisabledHint', { ns: 'workflow' }) : undefined + const workflowToolVisible = appDetail?.mode === AppModeEnum.WORKFLOW && !hasHumanInputNode && !hasTriggerNode + const workflowToolPublished = !!toolPublished + const closeWorkflowToolDrawer = useCallback(() => setWorkflowToolDrawerOpen(false), []) + const workflowToolIcon = useMemo(() => ({ + content: (appDetail?.icon_type === 'image' ? '🤖' : appDetail?.icon) || '🤖', + background: (appDetail?.icon_type === 'image' ? appDefaultIconBackground : appDetail?.icon_background) || appDefaultIconBackground, + }), [appDetail?.icon, appDetail?.icon_background, appDetail?.icon_type]) + const workflowTool = useConfigureButton({ + enabled: workflowToolVisible, + published: workflowToolPublished, + detailNeedUpdate: workflowToolPublished && published, + workflowAppId: appDetail?.id ?? '', + icon: workflowToolIcon, + name: appDetail?.name ?? '', + description: appDetail?.description ?? '', + inputs, + outputs, + handlePublish, + onRefreshData, + onConfigured: closeWorkflowToolDrawer, + }) + const openWorkflowToolDrawer = useCallback(() => { + handleOpenChange(false) + setWorkflowToolDrawerOpen(true) + }, [handleOpenChange]) const upgradeHighlightStyle = useMemo(() => ({ background: 'linear-gradient(97deg, var(--components-input-border-active-prompt-1, rgba(11, 165, 236, 0.95)) -3.64%, var(--components-input-border-active-prompt-2, rgba(21, 90, 239, 0.95)) 45.14%)', WebkitBackgroundClip: 'text', @@ -343,23 +431,27 @@ const AppPublisher = ({ handleOpenChange(false) handleOpenInExplore() }} + handleOpenRunConfig={handleOpenWorkflowLaunchDialog} handlePublish={handlePublish} hasHumanInputNode={hasHumanInputNode} hasTriggerNode={hasTriggerNode} - inputs={inputs} missingStartNode={missingStartNode} - onRefreshData={onRefreshData} - outputs={outputs} published={published} publishedAt={publishedAt} + showBatchRunConfig={hiddenLaunchVariables.length > 0 && (appDetail?.mode === AppModeEnum.WORKFLOW || appDetail?.mode === AppModeEnum.COMPLETION)} + showRunConfig={hiddenLaunchVariables.length > 0} toolPublished={toolPublished} workflowToolAvailable={workflowToolAvailable} + workflowToolIsLoading={workflowTool.isLoading} + workflowToolOutdated={workflowTool.outdated} + workflowToolIsCurrentWorkspaceManager={workflowTool.isCurrentWorkspaceManager} workflowToolMessage={workflowToolMessage} + onConfigureWorkflowTool={openWorkflowToolDrawer} /> {systemFeatures.enable_creators_platform && (
} + icon={} disabled={!publishedAt || publishingToMarketplace} onClick={handlePublishToMarketplace} > @@ -377,9 +469,29 @@ const AppPublisher = ({ onClose={() => setEmbeddingModalOpen(false)} appBaseUrl={appBaseURL} accessToken={accessToken} + hiddenInputs={hiddenLaunchVariables} /> {showAppAccessControl && { setShowAppAccessControl(false) }} />} + + {workflowToolDrawerOpen && ( + + )} ) } diff --git a/web/app/components/app/app-publisher/sections.tsx b/web/app/components/app/app-publisher/sections.tsx index 57522095ae..712312b744 100644 --- a/web/app/components/app/app-publisher/sections.tsx +++ b/web/app/components/app/app-publisher/sections.tsx @@ -8,13 +8,12 @@ import { TooltipContent, TooltipTrigger, } from '@langgenius/dify-ui/tooltip' +import { RiSettings2Line } from '@remixicon/react' import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' -import { CodeBrowser } from '@/app/components/base/icons/src/vender/line/development' import Loading from '@/app/components/base/loading' import UpgradeBtn from '@/app/components/billing/upgrade-btn' import WorkflowToolConfigureButton from '@/app/components/tools/workflow-tool/configure-button' -import { appDefaultIconBackground } from '@/config' import { AppModeEnum } from '@/types/app' import ShortcutsName from '../../workflow/shortcuts-name' import PublishWithMultipleModel from './publish-with-multiple-model' @@ -46,11 +45,8 @@ type AccessSectionProps = { type ActionsSectionProps = Pick & { appDetail: { @@ -67,9 +63,16 @@ type ActionsSectionProps = Pick void handleOpenInExplore: () => void + handleOpenRunConfig?: (url: string) => void handlePublish: (params?: ModelAndParameter | PublishWorkflowParams) => Promise published: boolean + showBatchRunConfig?: boolean + showRunConfig?: boolean + workflowToolIsLoading: boolean + workflowToolOutdated: boolean + workflowToolIsCurrentWorkspaceManager: boolean workflowToolMessage?: string + onConfigureWorkflowTool: () => void } export const AccessModeDisplay = ({ mode }: { mode?: keyof typeof ACCESS_MODE_MAP }) => { @@ -256,18 +259,20 @@ export const PublisherActionsSection = ({ disabledFunctionTooltip, handleEmbed, handleOpenInExplore, - handlePublish, + handleOpenRunConfig, hasHumanInputNode = false, hasTriggerNode = false, - inputs, missingStartNode = false, - onRefreshData, - outputs, - published, publishedAt, + showBatchRunConfig = false, + showRunConfig = false, toolPublished, workflowToolAvailable = true, + workflowToolIsLoading, + workflowToolOutdated, + workflowToolIsCurrentWorkspaceManager, workflowToolMessage, + onConfigureWorkflowTool, }: ActionsSectionProps) => { const { t } = useTranslation() @@ -284,6 +289,13 @@ export const PublisherActionsSection = ({ disabled={disabledFunctionButton} link={appURL} icon={} + actionButton={showRunConfig + ? { + ariaLabel: t('operation.config', { ns: 'common' }), + icon: , + onClick: () => handleOpenRunConfig?.(appURL), + } + : undefined} > {t('common.runApp', { ns: 'workflow' })} @@ -296,6 +308,13 @@ export const PublisherActionsSection = ({ disabled={disabledFunctionButton} link={`${appURL}${appURL.includes('?') ? '&' : '?'}mode=batch`} icon={} + actionButton={showBatchRunConfig + ? { + ariaLabel: t('operation.config', { ns: 'common' }), + icon: , + onClick: () => handleOpenRunConfig?.(`${appURL}${appURL.includes('?') ? '&' : '?'}mode=batch`), + } + : undefined} > {t('common.batchRunApp', { ns: 'workflow' })} @@ -305,7 +324,7 @@ export const PublisherActionsSection = ({ } + icon={} > {t('common.embedIntoSite', { ns: 'workflow' })} @@ -340,18 +359,10 @@ export const PublisherActionsSection = ({ )} diff --git a/web/app/components/app/app-publisher/suggested-action.tsx b/web/app/components/app/app-publisher/suggested-action.tsx index db13364eb9..c1cec6f819 100644 --- a/web/app/components/app/app-publisher/suggested-action.tsx +++ b/web/app/components/app/app-publisher/suggested-action.tsx @@ -1,33 +1,93 @@ -import type { HTMLProps, PropsWithChildren } from 'react' +import type { HTMLProps, PropsWithChildren, MouseEvent as ReactMouseEvent } from 'react' import { cn } from '@langgenius/dify-ui/cn' import { RiArrowRightUpLine } from '@remixicon/react' +type SuggestedActionButton = { + ariaLabel: string + icon: React.ReactNode + onClick: (event: ReactMouseEvent) => void +} + type SuggestedActionProps = PropsWithChildren & { icon?: React.ReactNode link?: string disabled?: boolean + actionButton?: SuggestedActionButton }> -const SuggestedAction = ({ icon, link, disabled, children, className, onClick, ...props }: SuggestedActionProps) => { - const handleClick = (e: React.MouseEvent) => { - if (disabled) +const SuggestedAction = ({ + icon, + link, + disabled, + children, + className, + onClick, + actionButton, + ...props +}: SuggestedActionProps) => { + const handleClick = (event: ReactMouseEvent) => { + if (disabled) { + event.preventDefault() return - onClick?.(e) + } + + onClick?.(event) } - return ( + + const handleActionClick = (event: ReactMouseEvent) => { + if (disabled) { + event.preventDefault() + return + } + + actionButton?.onClick(event) + } + + const mainAction = ( -
{icon}
+
{icon}
{children}
- +
) + + if (!actionButton) + return mainAction + + return ( +
+ {mainAction} + +
+ ) } export default SuggestedAction diff --git a/web/app/components/app/configuration/config-var/config-modal/__tests__/form-fields.spec.tsx b/web/app/components/app/configuration/config-var/config-modal/__tests__/form-fields.spec.tsx index 7a63df3350..cdb1d17833 100644 --- a/web/app/components/app/configuration/config-var/config-modal/__tests__/form-fields.spec.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/__tests__/form-fields.spec.tsx @@ -4,6 +4,29 @@ import { fireEvent, render, screen } from '@testing-library/react' import { InputVarType } from '@/app/components/workflow/types' import ConfigModalFormFields from '../form-fields' +vi.mock('react-i18next', async () => { + const React = await import('react') + return { + useTranslation: () => ({ + t: (key: string, options?: Record) => { + const ns = options?.ns as string | undefined + return ns ? `${ns}.${key}` : key + }, + i18n: { language: 'en', changeLanguage: vi.fn() }, + }), + Trans: ({ i18nKey, components }: { i18nKey: string, components?: Record }) => ( + + {i18nKey} + {components?.docLink} + + ), + } +}) + +vi.mock('@/context/i18n', () => ({ + useDocLink: () => (path?: string) => `https://docs.example.com${path || ''}`, +})) + vi.mock('@/app/components/base/file-uploader', () => ({ FileUploaderInAttachmentWrapper: ({ onChange, @@ -74,6 +97,12 @@ vi.mock('@langgenius/dify-ui/select', async (importOriginal) => { } }) +vi.mock('@langgenius/dify-ui/tooltip', () => ({ + Tooltip: ({ children }: { children: ReactNode }) =>
{children}
, + TooltipTrigger: ({ children }: { children: ReactNode }) =>
{children}
, + TooltipContent: ({ children }: { children: ReactNode }) =>
{children}
, +})) + vi.mock('../field', () => ({ default: ({ children, title }: { children: ReactNode, title: string }) => (
@@ -176,7 +205,18 @@ describe('ConfigModalFormFields', () => { expect(selectProps.payloadChangeHandlers.default).toHaveBeenCalledWith('beta') }) - it('should wire file, json schema, and visibility controls', () => { + it('should wire file, json schema, and visibility controls', async () => { + const textInputProps = createBaseProps() + const textInputView = render() + expect(screen.getByText('variableConfig.hidden')).toBeInTheDocument() + fireEvent.click(screen.getByRole('button', { name: 'variableConfig.hiddenDescription' })) + expect(await screen.findByText('variableConfig.hiddenDescription')).toBeInTheDocument() + const docLink = await screen.findByRole('link') + expect(docLink).toHaveAttribute('href', 'https://docs.example.com/use-dify/nodes/user-input#hide-and-pre-fill-input-fields') + expect(docLink).toHaveAttribute('target', '_blank') + expect(docLink).toHaveAttribute('rel', 'noopener noreferrer') + textInputView.unmount() + const singleFileProps = createBaseProps() singleFileProps.tempPayload = { ...singleFileProps.tempPayload, @@ -185,18 +225,20 @@ describe('ConfigModalFormFields', () => { allowed_file_extensions: [], allowed_file_upload_methods: ['remote_url'], } - render() + const singleFileView = render() + expect(screen.queryByText('variableConfig.hidden')).not.toBeInTheDocument() + expect(screen.queryByText('variableConfig.hiddenDescription')).not.toBeInTheDocument() fireEvent.click(screen.getByText('single-file-setting')) fireEvent.click(screen.getByText('upload-file')) fireEvent.click(screen.getAllByText('unchecked')[0]!) - fireEvent.click(screen.getAllByText('unchecked')[1]!) expect(singleFileProps.onFilePayloadChange).toHaveBeenCalledWith({ number_limits: 1 }) expect(singleFileProps.payloadChangeHandlers.default).toHaveBeenCalledWith(expect.objectContaining({ fileId: 'file-1', })) expect(singleFileProps.payloadChangeHandlers.required).toHaveBeenCalledWith(true) - expect(singleFileProps.payloadChangeHandlers.hide).toHaveBeenCalledWith(true) + expect(singleFileProps.payloadChangeHandlers.hide).not.toHaveBeenCalled() + singleFileView.unmount() const multiFileProps = createBaseProps() multiFileProps.tempPayload = { @@ -207,8 +249,9 @@ describe('ConfigModalFormFields', () => { allowed_file_upload_methods: ['remote_url'], } render() + expect(screen.queryByText('variableConfig.hidden')).not.toBeInTheDocument() fireEvent.click(screen.getByText('multi-file-setting')) - fireEvent.click(screen.getAllByText('upload-file')[1]!) + fireEvent.click(screen.getAllByText('upload-file')[0]!) expect(multiFileProps.onFilePayloadChange).toHaveBeenCalledWith({ number_limits: 3 }) expect(multiFileProps.payloadChangeHandlers.default).toHaveBeenCalledWith([ expect.objectContaining({ fileId: 'file-1' }), @@ -367,4 +410,23 @@ describe('ConfigModalFormFields', () => { expect(screen.getByRole('spinbutton')).toHaveValue(null) }) + + it('should disable hide checkbox when required is true and disable required when hide is true', () => { + const requiredProps = createBaseProps() + requiredProps.tempPayload = { ...requiredProps.tempPayload, type: InputVarType.textInput, required: true, hide: false } + const { unmount } = render() + + const buttons = screen.getAllByRole('button') + const hideButton = buttons.find(btn => btn.textContent === 'unchecked' && btn !== buttons[0]) + expect(hideButton).toBeDefined() + unmount() + + const hideProps = createBaseProps() + hideProps.tempPayload = { ...hideProps.tempPayload, type: InputVarType.textInput, required: false, hide: true } + render() + + const allButtons = screen.getAllByRole('button') + const checkedHideButton = allButtons.find(btn => btn.textContent === 'checked') + expect(checkedHideButton).toBeDefined() + }) }) diff --git a/web/app/components/app/configuration/config-var/config-modal/__tests__/index-logic.spec.tsx b/web/app/components/app/configuration/config-var/config-modal/__tests__/index-logic.spec.tsx index e6cb56f490..d32bcec755 100644 --- a/web/app/components/app/configuration/config-var/config-modal/__tests__/index-logic.spec.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/__tests__/index-logic.spec.tsx @@ -25,6 +25,7 @@ vi.mock('../form-fields', () => ({ return (
{String(props.tempPayload.type)}
+
{String(props.tempPayload.hide)}
{String(props.tempPayload.label ?? '')}
{String(props.tempPayload.json_schema ?? '')}
{String(props.tempPayload.default ?? '')}
@@ -115,7 +116,7 @@ describe('ConfigModal logic', () => { }) it('should derive payload fields from mocked form-field callbacks', async () => { - renderConfigModal() + renderConfigModal(createPayload({ hide: true })) fireEvent.click(screen.getByTestId('valid-key-blur')) await waitFor(() => { @@ -138,6 +139,7 @@ describe('ConfigModal logic', () => { fireEvent.click(screen.getByTestId('type-change')) await waitFor(() => { expect(screen.getByTestId('payload-type')).toHaveTextContent(InputVarType.singleFile) + expect(screen.getByTestId('payload-hide')).toHaveTextContent('false') }) fireEvent.click(screen.getByTestId('file-payload-change')) diff --git a/web/app/components/app/configuration/config-var/config-modal/__tests__/utils.spec.ts b/web/app/components/app/configuration/config-var/config-modal/__tests__/utils.spec.ts index 1c00e1c5b2..2317868004 100644 --- a/web/app/components/app/configuration/config-var/config-modal/__tests__/utils.spec.ts +++ b/web/app/components/app/configuration/config-var/config-modal/__tests__/utils.spec.ts @@ -49,11 +49,13 @@ describe('config-modal utils', () => { const payload = createInputVar({ type: InputVarType.textInput, default: 'hello', + hide: true, }) const nextPayload = createPayloadForType(payload, InputVarType.multiFiles) expect(nextPayload.type).toBe(InputVarType.multiFiles) + expect(nextPayload.hide).toBe(false) expect(nextPayload.max_length).toBe(DEFAULT_FILE_UPLOAD_SETTING.max_length) expect(nextPayload.allowed_file_types).toEqual(DEFAULT_FILE_UPLOAD_SETTING.allowed_file_types) expect(nextPayload.default).toBe('hello') @@ -249,6 +251,24 @@ describe('config-modal utils', () => { }) }) + it('should force file inputs to stay visible when saving', () => { + const result = validateConfigModalPayload({ + tempPayload: createInputVar({ + type: InputVarType.singleFile, + hide: true, + allowed_file_types: [SupportUploadFileTypes.document], + allowed_file_extensions: [], + }), + payload: createInputVar(), + checkVariableName: () => true, + t, + }) + + expect(result.payloadToSave).toEqual(expect.objectContaining({ + hide: false, + })) + }) + it('should stop validation when the variable name checker rejects the payload', () => { const result = validateConfigModalPayload({ tempPayload: createInputVar({ diff --git a/web/app/components/app/configuration/config-var/config-modal/form-fields.tsx b/web/app/components/app/configuration/config-var/config-modal/form-fields.tsx index 748108e19a..4bd938c3f6 100644 --- a/web/app/components/app/configuration/config-var/config-modal/form-fields.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/form-fields.tsx @@ -13,14 +13,17 @@ import { SelectValue, } from '@langgenius/dify-ui/select' import * as React from 'react' +import { Trans } from 'react-i18next' import Checkbox from '@/app/components/base/checkbox' import { FileUploaderInAttachmentWrapper } from '@/app/components/base/file-uploader' +import { Infotip } from '@/app/components/base/infotip' import Input from '@/app/components/base/input' import Textarea from '@/app/components/base/textarea' import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' import FileUploadSetting from '@/app/components/workflow/nodes/_base/components/file-upload-setting' import { CodeLanguage } from '@/app/components/workflow/nodes/code/types' import { InputVarType, SupportUploadFileTypes } from '@/app/components/workflow/types' +import { useDocLink } from '@/context/i18n' import { TransferMethod } from '@/types/app' import ConfigSelect from '../config-select' import ConfigString from '../config-string' @@ -68,6 +71,9 @@ const ConfigModalFormFields: FC = ({ t, }) => { const { type, label, variable } = tempPayload + const isFileInput = [InputVarType.singleFile, InputVarType.multiFiles].includes(type) + const docLink = useDocLink() + const hiddenDescriptionAriaLabel = t('variableConfig.hiddenDescription', { ns: 'appDebug' }).replace(/<[^>]+>/g, '') return (
@@ -105,7 +111,7 @@ const ConfigModalFormFields: FC = ({ {type === InputVarType.textInput && ( onPayloadChange('default')(e.target.value || undefined)} placeholder={t('variableConfig.inputPlaceholder', { ns: 'appDebug' })} /> @@ -126,7 +132,7 @@ const ConfigModalFormFields: FC = ({ onPayloadChange('default')(e.target.value || undefined)} placeholder={t('variableConfig.inputPlaceholder', { ns: 'appDebug' })} /> @@ -186,7 +192,7 @@ const ConfigModalFormFields: FC = ({ )} - {[InputVarType.singleFile, InputVarType.multiFiles].includes(type) && ( + {isFileInput && ( <> = ({ )}
- onPayloadChange('required')(!tempPayload.required)} /> + onPayloadChange('required')(!tempPayload.required)} /> {t('variableConfig.required', { ns: 'appDebug' })}
-
- onPayloadChange('hide')(!tempPayload.hide)} /> - {t('variableConfig.hide', { ns: 'appDebug' })} -
+ {!isFileInput && ( +
+ onPayloadChange('hide')(!tempPayload.hide)} /> +
+ {t('variableConfig.hidden', { ns: 'appDebug' })} + + + ), + }} + /> + +
+
+ )}
) } diff --git a/web/app/components/app/configuration/config-var/config-modal/utils.ts b/web/app/components/app/configuration/config-var/config-modal/utils.ts index fdc0ac3501..e24e4b6593 100644 --- a/web/app/components/app/configuration/config-var/config-modal/utils.ts +++ b/web/app/components/app/configuration/config-var/config-modal/utils.ts @@ -88,7 +88,9 @@ export const createPayloadForType = (payload: InputVar, type: InputVarType) => { draft.default = undefined if ([InputVarType.singleFile, InputVarType.multiFiles].includes(type)) { - (Object.keys(DEFAULT_FILE_UPLOAD_SETTING) as Array).forEach((key) => { + draft.hide = false + const fileUploadSettingKeys = Object.keys(DEFAULT_FILE_UPLOAD_SETTING) as Array + fileUploadSettingKeys.forEach((key) => { if (key !== 'max_length') draft[key] = DEFAULT_FILE_UPLOAD_SETTING[key] as never }) @@ -158,38 +160,41 @@ export const validateConfigModalPayload = ({ checkVariableName, t, }: ValidateConfigModalPayloadOptions): ValidateConfigModalPayloadResult => { + const normalizedTempPayload = [InputVarType.singleFile, InputVarType.multiFiles].includes(tempPayload.type) + ? { ...tempPayload, hide: false } + : tempPayload const jsonSchemaValue = tempPayload.json_schema const schemaEmpty = isJsonSchemaEmpty(jsonSchemaValue) const normalizedJsonSchema = schemaEmpty ? undefined : jsonSchemaValue - const payloadToSave = tempPayload.type === InputVarType.jsonObject && schemaEmpty - ? { ...tempPayload, json_schema: undefined } - : tempPayload + const payloadToSave = normalizedTempPayload.type === InputVarType.jsonObject && schemaEmpty + ? { ...normalizedTempPayload, json_schema: undefined } + : normalizedTempPayload - const moreInfo = tempPayload.variable === payload?.variable + const moreInfo = normalizedTempPayload.variable === payload?.variable ? undefined : { type: ChangeType.changeVarName, - payload: { beforeKey: payload?.variable || '', afterKey: tempPayload.variable }, + payload: { beforeKey: payload?.variable || '', afterKey: normalizedTempPayload.variable }, } - if (!checkVariableName(tempPayload.variable)) + if (!checkVariableName(normalizedTempPayload.variable)) return {} - if (!tempPayload.label) { + if (!normalizedTempPayload.label) { return { errorMessage: t('variableConfig.errorMsg.labelNameRequired', { ns: 'appDebug' }), } } - if (tempPayload.type === InputVarType.select) { - if (!tempPayload.options?.length) { + if (normalizedTempPayload.type === InputVarType.select) { + if (!normalizedTempPayload.options?.length) { return { errorMessage: t('variableConfig.errorMsg.atLeastOneOption', { ns: 'appDebug' }), } } const duplicated = new Set() - const hasRepeatedItem = tempPayload.options.some((option) => { + const hasRepeatedItem = normalizedTempPayload.options.some((option) => { if (duplicated.has(option)) return true @@ -204,8 +209,8 @@ export const validateConfigModalPayload = ({ } } - if ([InputVarType.singleFile, InputVarType.multiFiles].includes(tempPayload.type)) { - if (!tempPayload.allowed_file_types?.length) { + if ([InputVarType.singleFile, InputVarType.multiFiles].includes(normalizedTempPayload.type)) { + if (!normalizedTempPayload.allowed_file_types?.length) { return { errorMessage: t('errorMsg.fieldRequired', { ns: 'workflow', @@ -214,7 +219,7 @@ export const validateConfigModalPayload = ({ } } - if (tempPayload.allowed_file_types.includes(SupportUploadFileTypes.custom) && !tempPayload.allowed_file_extensions?.length) { + if (normalizedTempPayload.allowed_file_types.includes(SupportUploadFileTypes.custom) && !normalizedTempPayload.allowed_file_extensions?.length) { return { errorMessage: t('errorMsg.fieldRequired', { ns: 'workflow', @@ -224,7 +229,7 @@ export const validateConfigModalPayload = ({ } } - if (tempPayload.type === InputVarType.jsonObject && !schemaEmpty && typeof normalizedJsonSchema === 'string') { + if (normalizedTempPayload.type === InputVarType.jsonObject && !schemaEmpty && typeof normalizedJsonSchema === 'string') { try { const schema = JSON.parse(normalizedJsonSchema) if (schema?.type !== 'object') { diff --git a/web/app/components/app/configuration/config-var/index.tsx b/web/app/components/app/configuration/config-var/index.tsx index 34c6bf5786..7c9ae5bea3 100644 --- a/web/app/components/app/configuration/config-var/index.tsx +++ b/web/app/components/app/configuration/config-var/index.tsx @@ -22,7 +22,7 @@ import { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import { ReactSortable } from 'react-sortablejs' import { useContext } from 'use-context-selector' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import { InputVarType } from '@/app/components/workflow/types' import ConfigContext from '@/context/debug-configuration' import { useEventEmitterContextContext } from '@/context/event-emitter' @@ -257,13 +257,9 @@ const ConfigVar: FC = ({ promptVariables, readonly, onPromptVar
{t('variableTitle', { ns: 'appDebug' })}
{!readonly && ( - - {t('variableTip', { ns: 'appDebug' })} -
- )} - /> + + {t('variableTip', { ns: 'appDebug' })} + )}
)} diff --git a/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx b/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx index 56f1863ec1..54ffee0600 100644 --- a/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx +++ b/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx @@ -2,7 +2,7 @@ import type { FC } from 'react' import { cn } from '@langgenius/dify-ui/cn' import * as React from 'react' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' type Props = { className?: string @@ -24,14 +24,9 @@ const ItemPanel: FC = ({
{icon}
{name}
- - {description} -
- )} - > - + + {description} +
{children} diff --git a/web/app/components/app/create-app-dialog/app-card/__tests__/index.spec.tsx b/web/app/components/app/create-app-dialog/app-card/__tests__/index.spec.tsx index 16971f77d5..d1b7dedac3 100644 --- a/web/app/components/app/create-app-dialog/app-card/__tests__/index.spec.tsx +++ b/web/app/components/app/create-app-dialog/app-card/__tests__/index.spec.tsx @@ -35,7 +35,7 @@ const mockApp: App = { copyright: 'Test Corp', privacy_policy: null, custom_disclaimer: null, - category: 'Assistant', + categories: ['Assistant'], position: 1, is_listed: true, install_count: 100, @@ -253,7 +253,7 @@ describe('AppCard', () => { template_id: mockApp.app_id, template_name: mockApp.app.name, template_mode: mockApp.app.mode, - template_category: mockApp.category, + template_categories: mockApp.categories, page: 'studio', }) expect(mockSetShowTryAppPanel).toHaveBeenCalledWith(true, { diff --git a/web/app/components/app/create-app-dialog/app-card/index.tsx b/web/app/components/app/create-app-dialog/app-card/index.tsx index e710e21436..27232b0350 100644 --- a/web/app/components/app/create-app-dialog/app-card/index.tsx +++ b/web/app/components/app/create-app-dialog/app-card/index.tsx @@ -35,7 +35,7 @@ const AppCard = ({ template_id: app.app_id, template_name: appBasicInfo.name, template_mode: appBasicInfo.mode, - template_category: app.category, + template_categories: app.categories, page: 'studio', }) setShowTryAppPanel?.(true, { appId: app.app_id, app }) diff --git a/web/app/components/app/create-app-dialog/app-list/__tests__/index.spec.tsx b/web/app/components/app/create-app-dialog/app-list/__tests__/index.spec.tsx index 0c6462c2f9..cd6c6b57eb 100644 --- a/web/app/components/app/create-app-dialog/app-list/__tests__/index.spec.tsx +++ b/web/app/components/app/create-app-dialog/app-list/__tests__/index.spec.tsx @@ -115,7 +115,7 @@ vi.mock('@/next/navigation', () => ({ const createAppEntry = (name: string, category: string) => ({ app_id: name, - category, + categories: [category], app: { id: name, name, diff --git a/web/app/components/app/create-app-dialog/app-list/index.tsx b/web/app/components/app/create-app-dialog/app-list/index.tsx index 1924de3893..b0f0b8ca59 100644 --- a/web/app/components/app/create-app-dialog/app-list/index.tsx +++ b/web/app/components/app/create-app-dialog/app-list/index.tsx @@ -74,7 +74,7 @@ const Apps = ({ const filteredByCategory = allList.filter((item) => { if (currCategory === allCategoriesEn) return true - return item.category === currCategory + return item.categories?.includes(currCategory) ?? false }) if (currentType.length === 0) return filteredByCategory diff --git a/web/app/components/app/log/__tests__/list.spec.tsx b/web/app/components/app/log/__tests__/list.spec.tsx index 25512ed689..fe589b599a 100644 --- a/web/app/components/app/log/__tests__/list.spec.tsx +++ b/web/app/components/app/log/__tests__/list.spec.tsx @@ -84,10 +84,6 @@ vi.mock('@/app/components/app/store', () => ({ }), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children: ReactNode }) => <>{children}, -})) - vi.mock('@/app/components/base/drawer', () => ({ default: ({ children, isOpen, onClose }: { children: ReactNode, isOpen: boolean, onClose: () => void }) => ( isOpen diff --git a/web/app/components/app/overview/__tests__/app-card-sections.spec.tsx b/web/app/components/app/overview/__tests__/app-card-sections.spec.tsx index 9820e15ad8..d3f83d5d9c 100644 --- a/web/app/components/app/overview/__tests__/app-card-sections.spec.tsx +++ b/web/app/components/app/overview/__tests__/app-card-sections.spec.tsx @@ -1,8 +1,38 @@ +import type { FormEvent } from 'react' import type { AppDetailResponse } from '@/models/app' import { fireEvent, render, screen, within } from '@testing-library/react' +import { InputVarType } from '@/app/components/workflow/types' import { AccessMode } from '@/models/access-control' import { AppModeEnum } from '@/types/app' -import { AppCardAccessControlSection, AppCardOperations, AppCardUrlSection, createAppCardOperations } from '../app-card-sections' +import { AppCardAccessControlSection, AppCardDialogs, AppCardOperations, AppCardUrlSection, createAppCardOperations, WorkflowLaunchDialog } from '../app-card-sections' + +vi.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => key, + }), + Trans: ({ i18nKey }: { i18nKey: string }) => {i18nKey}, +})) + +vi.mock('../settings', () => ({ + default: () =>
, +})) + +vi.mock('../embedded', () => ({ + default: () =>
, +})) + +vi.mock('../customize', () => ({ + default: () =>
, +})) + +vi.mock('../../app-access-control', () => ({ + default: ({ onClose, onConfirm }: { onClose: () => void, onConfirm: () => void }) => ( +
+ + +
+ ), +})) describe('app-card-sections', () => { const t = (key: string) => key @@ -52,6 +82,7 @@ describe('app-card-sections', () => { it('should render operation buttons and execute enabled actions', () => { const onLaunch = vi.fn() + const onLaunchConfig = vi.fn() const operations = createAppCardOperations({ operationKeys: ['launch', 'embedded'], t: t as never, @@ -68,12 +99,19 @@ describe('app-card-sections', () => { , ) fireEvent.click(screen.getByRole('button', { name: /overview\.appInfo\.launch/i })) + fireEvent.click(screen.getByRole('button', { name: /operation\.config/i })) expect(onLaunch).toHaveBeenCalledTimes(1) + expect(onLaunchConfig).toHaveBeenCalledTimes(1) expect(screen.getByRole('button', { name: /overview\.appInfo\.embedded\.entry/i })).toBeInTheDocument() }) @@ -127,4 +165,127 @@ describe('app-card-sections', () => { fireEvent.click(within(dialog).getByRole('button', { name: /operation\.confirm/i })) expect(onRegenerate).toHaveBeenCalledTimes(1) }) + + it('should disable all operations when triggerModeDisabled is true', () => { + const operations = createAppCardOperations({ + operationKeys: ['launch', 'settings'], + t: t as never, + runningStatus: true, + triggerModeDisabled: true, + onLaunch: vi.fn(), + onEmbedded: vi.fn(), + onCustomize: vi.fn(), + onSettings: vi.fn(), + onDevelop: vi.fn(), + }) + + expect(operations[0]!.disabled).toBe(true) + expect(operations[1]!.disabled).toBe(true) + }) + + it('should render WorkflowLaunchDialog and submit values', () => { + const onOpenChange = vi.fn() + const onValueChange = vi.fn() + const onSubmit = vi.fn((event: FormEvent) => { + event.preventDefault() + }) + + render( + , + ) + + expect(screen.getByText('overview.appInfo.workflowLaunchHiddenInputs.title')).toBeInTheDocument() + fireEvent.submit(screen.getByRole('button', { name: /overview\.appInfo\.launch/i }).closest('form')!) + expect(onSubmit).toHaveBeenCalled() + }) + + it('should return null for WorkflowLaunchDialog when no variables are provided', () => { + const { container } = render( + , + ) + + expect(container).toBeEmptyDOMElement() + }) + + it('should render AppCardDialogs with all modals for web apps', () => { + const appInfo = { + id: 'app-1', + mode: AppModeEnum.CHAT, + enable_site: true, + enable_api: false, + site: { app_base_url: 'https://example.com', access_token: 'token-1' }, + api_base_url: 'https://api.example.com', + } as never + + render( + , + ) + + expect(screen.getByTestId('settings-modal')).toBeInTheDocument() + expect(screen.getByTestId('embedded-modal')).toBeInTheDocument() + expect(screen.getByTestId('customize-modal')).toBeInTheDocument() + expect(screen.getByTestId('access-control')).toBeInTheDocument() + }) + + it('should return null for AppCardDialogs when not an app', () => { + const { container } = render( + , + ) + + expect(container).toBeEmptyDOMElement() + }) }) diff --git a/web/app/components/app/overview/__tests__/app-card-utils.spec.ts b/web/app/components/app/overview/__tests__/app-card-utils.spec.ts index fbfcdaf955..0a6d7f7dd7 100644 --- a/web/app/components/app/overview/__tests__/app-card-utils.spec.ts +++ b/web/app/components/app/overview/__tests__/app-card-utils.spec.ts @@ -1,9 +1,22 @@ import type { AppDetailResponse } from '@/models/app' -import { BlockEnum } from '@/app/components/workflow/types' +import { BlockEnum, InputVarType } from '@/app/components/workflow/types' import { AccessMode } from '@/models/access-control' import { AppModeEnum } from '@/types/app' import { basePath } from '@/utils/var' -import { getAppCardDisplayState, getAppCardOperationKeys, hasWorkflowStartNode, isAppAccessConfigured } from '../app-card-utils' +import { + buildWorkflowLaunchUrl, + compressAndEncodeBase64, + createWorkflowLaunchInitialValues, + getAppCardDisplayState, + getAppCardOperationKeys, + getAppHiddenLaunchVariables, + getEmbeddedIframeSnippet, + getEmbeddedScriptSnippet, + getWorkflowHiddenStartVariables, + hasWorkflowStartNode, + isAppAccessConfigured, + isWorkflowLaunchInputSupported, +} from '../app-card-utils' describe('app-card-utils', () => { const baseAppInfo = { @@ -33,6 +46,108 @@ describe('app-card-utils', () => { })).toBe(false) }) + it('should return hidden workflow start variables and their initial launch values', () => { + const hiddenVariables = getWorkflowHiddenStartVariables({ + graph: { + nodes: [{ + data: { + type: BlockEnum.Start, + variables: [ + { + variable: 'visible', + label: 'Visible', + type: InputVarType.textInput, + hide: false, + required: false, + }, + { + variable: 'secret', + label: 'Secret', + type: InputVarType.textInput, + hide: true, + default: 'prefilled', + required: false, + }, + { + variable: 'enabled', + label: 'Enabled', + type: InputVarType.checkbox, + hide: true, + default: true, + required: false, + }, + ], + }, + }], + }, + }) + + expect(hiddenVariables.map(variable => variable.variable)).toEqual(['secret', 'enabled']) + expect(createWorkflowLaunchInitialValues(hiddenVariables)).toEqual({ + secret: 'prefilled', + enabled: true, + }) + }) + + it('should return hidden advanced-chat launch variables from the workflow start node first', () => { + const hiddenVariables = getAppHiddenLaunchVariables({ + appInfo: { + ...baseAppInfo, + mode: AppModeEnum.ADVANCED_CHAT, + model_config: { + user_input_form: [ + { + 'text-input': { + label: 'Visible', + variable: 'visible', + required: true, + max_length: 48, + default: '', + hide: false, + }, + }, + { + checkbox: { + label: 'Hidden Toggle', + variable: 'hidden_toggle', + required: false, + default: true, + hide: true, + }, + }, + ], + }, + } as AppDetailResponse, + currentWorkflow: { + graph: { + nodes: [{ + data: { + type: BlockEnum.Start, + variables: [ + { + variable: 'start_secret', + label: 'Start Secret', + type: InputVarType.textInput, + hide: true, + default: 'from-start', + required: false, + }, + ], + }, + }], + }, + }, + }) + + expect(hiddenVariables).toEqual([ + expect.objectContaining({ + variable: 'start_secret', + type: InputVarType.textInput, + default: 'from-start', + }), + ]) + }) + it('should build the display state for a published web app', () => { const state = getAppCardDisplayState({ appInfo: baseAppInfo, @@ -104,4 +219,108 @@ describe('app-card-utils', () => { isCurrentWorkspaceEditor: false, })).toEqual(['launch', 'embedded', 'customize']) }) + + it('should build a workflow launch URL with serialized parameters', async () => { + const url = await buildWorkflowLaunchUrl({ + accessibleUrl: 'https://example.com/app/workflow/token-1', + variables: [ + { variable: 'name', label: 'Name', type: InputVarType.textInput, hide: true, required: false }, + { variable: 'enabled', label: 'Enabled', type: InputVarType.checkbox, hide: true, required: false }, + ], + values: { name: 'Alice', enabled: true }, + }) + + const parsed = new URL(url) + expect(parsed.searchParams.get('name')).toBe('Alice') + expect(parsed.searchParams.get('enabled')).toBe('true') + }) + + it('should serialize checkbox false and empty string values in launch URL', async () => { + const url = await buildWorkflowLaunchUrl({ + accessibleUrl: 'https://example.com/app/workflow/token-1', + variables: [ + { variable: 'flag', label: 'Flag', type: InputVarType.checkbox, hide: true, required: false }, + { variable: 'empty', label: 'Empty', type: InputVarType.textInput, hide: true, required: false }, + ], + values: { flag: false, empty: '' }, + }) + + const parsed = new URL(url) + expect(parsed.searchParams.get('flag')).toBe('false') + expect(parsed.searchParams.get('empty')).toBe('') + }) + + it('should generate an iframe snippet with the provided URL', () => { + const snippet = getEmbeddedIframeSnippet('https://example.com/chatbot/token-1') + expect(snippet).toContain('src="https://example.com/chatbot/token-1"') + expect(snippet).toContain('frameborder="0"') + expect(snippet).toContain('allow="microphone"') + }) + + it('should generate an embedded script snippet with inputs', () => { + const snippet = getEmbeddedScriptSnippet({ + url: 'https://example.com', + token: 'abc123', + primaryColor: '#FF0000', + isTestEnv: true, + inputValues: { name: 'Alice', count: '5' }, + }) + + expect(snippet).toContain('token: \'abc123\'') + expect(snippet).toContain('isDev: true') + expect(snippet).toContain('name: "Alice"') + expect(snippet).toContain('count: "5"') + expect(snippet).toContain('background-color: #FF0000') + }) + + it('should generate an embedded script snippet with empty inputs comment', () => { + const snippet = getEmbeddedScriptSnippet({ + url: 'https://example.com', + token: 'abc123', + primaryColor: '#1C64F2', + inputValues: {}, + }) + + expect(snippet).toContain('// You can define the inputs from the Start node here') + expect(snippet).not.toContain('isDev: true') + }) + + it('should compress and encode base64 using CompressionStream when available', async () => { + const result = await compressAndEncodeBase64('hello') + expect(typeof result).toBe('string') + expect(result.length).toBeGreaterThan(0) + }) + + it('should fallback to plain base64 when CompressionStream is unavailable', async () => { + const original = globalThis.CompressionStream + // @ts-expect-error remove for test + delete globalThis.CompressionStream + + const result = await compressAndEncodeBase64('hello') + expect(result).toBe(btoa('hello')) + + globalThis.CompressionStream = original + }) + + it('should identify supported workflow launch input types', () => { + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.textInput, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.paragraph, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.select, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.number, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.checkbox, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.json, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.jsonObject, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.url, hide: true, required: false })).toBe(true) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.files, hide: true, required: false })).toBe(false) + expect(isWorkflowLaunchInputSupported({ variable: 'v', label: 'V', type: InputVarType.singleFile, hide: true, required: false })).toBe(false) + }) + + it('should coerce numeric defaults to string in createWorkflowLaunchInitialValues', () => { + const result = createWorkflowLaunchInitialValues([ + { variable: 'count', label: 'Count', type: InputVarType.number, hide: true, required: false, default: 42 }, + { variable: 'empty', label: 'Empty', type: InputVarType.textInput, hide: true, required: false }, + ]) + + expect(result).toEqual({ count: '42', empty: '' }) + }) }) diff --git a/web/app/components/app/overview/__tests__/app-card.spec.tsx b/web/app/components/app/overview/__tests__/app-card.spec.tsx index 2f730ad278..1e9ba71a4f 100644 --- a/web/app/components/app/overview/__tests__/app-card.spec.tsx +++ b/web/app/components/app/overview/__tests__/app-card.spec.tsx @@ -1,7 +1,8 @@ -import type { ReactElement, ReactNode } from 'react' +import type { ReactElement } from 'react' import type { AppDetailResponse } from '@/models/app' import { fireEvent, screen, waitFor } from '@testing-library/react' import { renderWithSystemFeatures } from '@/__tests__/utils/mock-system-features' +import { InputVarType } from '@/app/components/workflow/types' import { AccessMode } from '@/models/access-control' import { AppModeEnum } from '@/types/app' import { basePath } from '@/utils/var' @@ -17,7 +18,7 @@ const mockSetAppDetail = vi.fn() const mockOnChangeStatus = vi.fn() const mockOnGenerateCode = vi.fn() -let mockWorkflow: { graph?: { nodes?: Array<{ data?: { type?: string } }> } } | null = null +let mockWorkflow: { graph?: { nodes?: Array<{ data?: { type?: string, variables?: Array> } }> } } | null = null let mockAccessSubjects: { groups?: unknown[], members?: unknown[] } = { groups: [], members: [] } let mockAppDetail: AppDetailResponse | undefined @@ -25,6 +26,7 @@ vi.mock('react-i18next', () => ({ useTranslation: () => ({ t: (key: string) => key, }), + Trans: ({ i18nKey }: { i18nKey?: string }) => i18nKey ?? null, })) vi.mock('@/context/app-context', () => ({ @@ -96,15 +98,6 @@ vi.mock('../../app-access-control', () => ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children, popupContent }: { children: ReactNode, popupContent?: ReactNode }) => ( -
- {children} - {popupContent} -
- ), -})) - const mockWindowOpen = vi.fn() Object.defineProperty(window, 'open', { writable: true, @@ -164,6 +157,182 @@ describe('AppCard', () => { expect(mockWindowOpen).toHaveBeenCalledWith(`https://example.com${basePath}/chat/access-token`, '_blank') }) + it('should open the workflow web app directly when launch is clicked even with hidden inputs', () => { + mockWorkflow = { + graph: { + nodes: [{ + data: { + type: 'start', + variables: [ + { + variable: 'secret', + label: 'Secret', + type: InputVarType.textInput, + hide: true, + required: true, + default: '', + }, + ], + }, + }], + }, + } + + render( + , + ) + + fireEvent.click(screen.getByText('overview.appInfo.launch')) + + expect(mockWindowOpen).toHaveBeenCalledWith( + `https://example.com${basePath}/workflow/access-token`, + '_blank', + ) + expect(screen.queryByText('overview.appInfo.workflowLaunchHiddenInputs.title')).not.toBeInTheDocument() + }) + + it('should collect hidden workflow inputs from the config action before launching the workflow web app', async () => { + mockWorkflow = { + graph: { + nodes: [{ + data: { + type: 'start', + variables: [ + { + variable: 'secret', + label: 'Secret', + type: InputVarType.textInput, + hide: true, + required: true, + default: '', + }, + ], + }, + }], + }, + } + + render( + , + ) + + fireEvent.click(screen.getByRole('button', { name: 'operation.config' })) + + expect(screen.getByText('overview.appInfo.workflowLaunchHiddenInputs.title')).toBeInTheDocument() + + fireEvent.change(screen.getByLabelText('Secret'), { + target: { value: 'top-secret' }, + }) + fireEvent.click(screen.getByRole('button', { name: 'overview.appInfo.launch' })) + + await waitFor(() => { + expect(mockWindowOpen).toHaveBeenCalledWith( + `https://example.com${basePath}/workflow/access-token?secret=${encodeURIComponent('top-secret')}`, + '_blank', + ) + }) + }) + + it('should open the chat web app directly when launch is clicked even with hidden inputs', () => { + mockWorkflow = { + graph: { + nodes: [{ + data: { + type: 'start', + variables: [ + { + variable: 'chat_secret', + label: 'Chat Secret', + type: InputVarType.textInput, + hide: true, + required: true, + default: '', + }, + ], + }, + }], + }, + } + + render( + , + ) + + fireEvent.click(screen.getByText('overview.appInfo.launch')) + + expect(mockWindowOpen).toHaveBeenCalledWith( + `https://example.com${basePath}/chat/access-token`, + '_blank', + ) + expect(screen.queryByText('overview.appInfo.workflowLaunchHiddenInputs.title')).not.toBeInTheDocument() + }) + + it('should collect hidden chatflow inputs from the config action before launching the chat web app', async () => { + mockWorkflow = { + graph: { + nodes: [{ + data: { + type: 'start', + variables: [ + { + variable: 'chat_secret', + label: 'Chat Secret', + type: InputVarType.textInput, + hide: true, + required: true, + default: '', + }, + ], + }, + }], + }, + } + + render( + , + ) + + fireEvent.click(screen.getByRole('button', { name: 'operation.config' })) + + expect(screen.getByText('overview.appInfo.workflowLaunchHiddenInputs.title')).toBeInTheDocument() + + fireEvent.change(screen.getByLabelText('Chat Secret'), { + target: { value: 'chat-secret' }, + }) + fireEvent.click(screen.getByRole('button', { name: 'overview.appInfo.launch' })) + + await waitFor(() => { + expect(mockWindowOpen).toHaveBeenCalledWith( + `https://example.com${basePath}/chat/access-token?chat_secret=${encodeURIComponent('chat-secret')}`, + '_blank', + ) + }) + }) + it('should show the access-control not-set badge when specific access has no subjects', () => { render( { }) it('should report refresh failures from access control updates', async () => { - const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => { }) mockFetchAppDetailDirect.mockRejectedValueOnce(new Error('refresh failed')) render( diff --git a/web/app/components/app/overview/__tests__/workflow-hidden-input-fields.spec.tsx b/web/app/components/app/overview/__tests__/workflow-hidden-input-fields.spec.tsx new file mode 100644 index 0000000000..309df540a6 --- /dev/null +++ b/web/app/components/app/overview/__tests__/workflow-hidden-input-fields.spec.tsx @@ -0,0 +1,214 @@ +import { fireEvent, render, screen } from '@testing-library/react' +import { InputVarType } from '@/app/components/workflow/types' +import WorkflowHiddenInputFields from '../workflow-hidden-input-fields' + +describe('WorkflowHiddenInputFields', () => { + const onValueChange = vi.fn() + + beforeEach(() => { + vi.clearAllMocks() + }) + + it('should render a text input with label and placeholder', () => { + render( + , + ) + + const input = screen.getByLabelText('Full Name') + expect(input).toHaveValue('Alice') + + fireEvent.change(input, { target: { value: 'Bob' } }) + expect(onValueChange).toHaveBeenCalledWith('name', 'Bob') + }) + + it('should render a number input for number-typed variables', () => { + render( + , + ) + + const input = screen.getByLabelText('Count') + expect(input).toHaveAttribute('type', 'number') + + fireEvent.change(input, { target: { value: '10' } }) + expect(onValueChange).toHaveBeenCalledWith('count', '10') + }) + + it('should render a checkbox input without a separate label element above', () => { + render( + , + ) + + const checkbox = screen.getByRole('checkbox') + expect(checkbox).toBeChecked() + expect(screen.getByText('Enable Feature')).toBeInTheDocument() + + fireEvent.click(checkbox) + expect(onValueChange).toHaveBeenCalledWith('enabled', false) + }) + + it('should render a select dropdown for select-typed variables', () => { + render( + , + ) + + expect(screen.getByRole('combobox', { name: 'Color' })).toBeInTheDocument() + }) + + it('should render a textarea for paragraph-typed variables', () => { + render( + , + ) + + const textarea = screen.getByPlaceholderText('Description') + expect(textarea).toHaveValue('Hello world') + + fireEvent.change(textarea, { target: { value: 'Updated' } }) + expect(onValueChange).toHaveBeenCalledWith('description', 'Updated') + }) + + it('should render a textarea for json-typed variables', () => { + render( + , + ) + + const textarea = screen.getByPlaceholderText('Config JSON') + expect(textarea).toHaveValue('{"key": "value"}') + }) + + it('should render a textarea for jsonObject-typed variables', () => { + render( + , + ) + + const textarea = screen.getByPlaceholderText('Schema') + expect(textarea).toHaveValue('{}') + }) + + it('should use the variable key as label when label is not a string', () => { + render( + , + ) + + expect(screen.getByText('my_var')).toBeInTheDocument() + }) + + it('should use the custom fieldIdPrefix for element ids', () => { + const { container } = render( + , + ) + + expect(container.querySelector('#custom-prefix-token')).toBeInTheDocument() + }) + + it('should render empty string for non-string fieldValue in text inputs', () => { + render( + , + ) + + const input = screen.getByLabelText('Flag') + expect(input).toHaveValue('') + }) +}) diff --git a/web/app/components/app/overview/app-card-sections.tsx b/web/app/components/app/overview/app-card-sections.tsx index 8fef355f34..8db5193f2d 100644 --- a/web/app/components/app/overview/app-card-sections.tsx +++ b/web/app/components/app/overview/app-card-sections.tsx @@ -1,7 +1,11 @@ /* eslint-disable react-refresh/only-export-components */ import type { TFunction } from 'i18next' -import type { ComponentType, ReactNode } from 'react' -import type { OverviewOperationKey } from './app-card-utils' +import type { ComponentType, FormEvent, ReactNode } from 'react' +import type { + OverviewOperationKey, + WorkflowHiddenStartVariable, + WorkflowLaunchInputValue, +} from './app-card-utils' import type { ConfigParams } from './settings' import type { AppDetailResponse } from '@/models/app' import type { AppSSO } from '@/types/app' @@ -15,12 +19,19 @@ import { AlertDialogTitle, } from '@langgenius/dify-ui/alert-dialog' import { Button } from '@langgenius/dify-ui/button' +import { + Dialog, + DialogContent, + DialogDescription, + DialogTitle, +} from '@langgenius/dify-ui/dialog' import { Tooltip, TooltipContent, TooltipTrigger, } from '@langgenius/dify-ui/tooltip' -import { RiArrowRightSLine, RiBookOpenLine, RiBuildingLine, RiEqualizer2Line, RiExternalLinkLine, RiGlobalLine, RiLockLine, RiPaintBrushLine, RiVerifiedBadgeLine, RiWindowLine } from '@remixicon/react' +import { RiArrowRightSLine, RiBookOpenLine, RiBuildingLine, RiEqualizer2Line, RiExternalLinkLine, RiGlobalLine, RiLockLine, RiPaintBrushLine, RiSettings2Line, RiVerifiedBadgeLine, RiWindowLine } from '@remixicon/react' +import { Trans } from 'react-i18next' import CopyFeedback from '@/app/components/base/copy-feedback' import Divider from '@/app/components/base/divider' import ShareQRCode from '@/app/components/base/qrcode' @@ -31,6 +42,7 @@ import CustomizeModal from './customize' import EmbeddedModal from './embedded' import SettingsModal from './settings' import style from './style.module.css' +import WorkflowHiddenInputFields from './workflow-hidden-input-fields' type AppInfo = AppDetailResponse & Partial @@ -50,6 +62,12 @@ type AppCardOperation = { onClick: () => void } +type LaunchConfigAction = { + label: string + disabled: boolean + onClick: () => void +} + const OPERATION_ICON_MAP: Record = { launch: RiExternalLinkLine, embedded: RiWindowLine, @@ -96,6 +114,65 @@ const MaybeTooltip = ({ ) } +export const WorkflowLaunchDialog = ({ + t, + open, + hiddenVariables, + unsupportedVariables, + values, + onOpenChange, + onValueChange, + onSubmit, +}: { + t: TFunction + open: boolean + hiddenVariables: WorkflowHiddenStartVariable[] + unsupportedVariables: WorkflowHiddenStartVariable[] + values: Record + onOpenChange: (open: boolean) => void + onValueChange: (variable: string, value: WorkflowLaunchInputValue) => void + onSubmit: (event: FormEvent) => void +}) => { + if (!hiddenVariables.length && !unsupportedVariables.length) + return null + + return ( + + +
+ + {t('overview.appInfo.workflowLaunchHiddenInputs.title', { ns: 'appOverview' })} + + + }} + /> + +
+
+
+ +
+
+ + +
+
+
+
+ ) +} + export const createAppCardOperations = ({ operationKeys, t, @@ -251,20 +328,15 @@ export const AppCardAccessControlSection = ({ export const AppCardOperations = ({ t, operations, + launchConfigAction, }: { t: TFunction operations: AppCardOperation[] + launchConfigAction?: LaunchConfigAction }) => ( <> - {operations.map(({ key, label, Icon, disabled, onClick }) => ( -
- - ))} + ) + + if (key === 'launch' && launchConfigAction) { + return ( + + + + ) + } + + return ( + + ) + })} ) @@ -295,6 +431,7 @@ export const AppCardDialogs = ({ onCloseAccessControl, onSaveSiteConfig, onConfirmAccessControl, + hiddenInputs, }: { isApp: boolean appInfo: AppInfo @@ -310,6 +447,7 @@ export const AppCardDialogs = ({ onCloseAccessControl: () => void onSaveSiteConfig?: (params: ConfigParams) => Promise onConfirmAccessControl: () => Promise + hiddenInputs?: WorkflowHiddenStartVariable[] }) => { if (!isApp) return null @@ -329,6 +467,7 @@ export const AppCardDialogs = ({ onClose={onCloseEmbedded} appBaseUrl={appInfo.site?.app_base_url} accessToken={appInfo.site?.access_token} + hiddenInputs={hiddenInputs} /> type AppInfo = AppDetailResponse & Partial @@ -16,6 +23,7 @@ type WorkflowLike = { nodes?: Array<{ data?: { type?: string + variables?: InputVar[] } }> } @@ -42,10 +50,173 @@ const getCardAppMode = (mode: AppModeEnum) => { return (mode !== AppModeEnum.COMPLETION && mode !== AppModeEnum.WORKFLOW) ? AppModeEnum.CHAT : mode } +const SUPPORTED_WORKFLOW_LAUNCH_INPUT_TYPES = new Set([ + InputVarType.textInput, + InputVarType.paragraph, + InputVarType.select, + InputVarType.number, + InputVarType.checkbox, + InputVarType.json, + InputVarType.jsonObject, + InputVarType.url, +]) + +const coerceWorkflowLaunchDefaultValue = (variable: WorkflowHiddenStartVariable): WorkflowLaunchInputValue => { + if (variable.type === InputVarType.checkbox) { + if (typeof variable.default === 'boolean') + return variable.default + + return String(variable.default).toLowerCase() === 'true' + } + + if (typeof variable.default === 'number') + return String(variable.default) + + return String(variable.default ?? '') +} + export const hasWorkflowStartNode = (currentWorkflow: WorkflowLike) => { return currentWorkflow?.graph?.nodes?.some(node => node.data?.type === BlockEnum.Start) ?? false } +export const getWorkflowHiddenStartVariables = (currentWorkflow: WorkflowLike): WorkflowHiddenStartVariable[] => { + const startNode = currentWorkflow?.graph?.nodes?.find(node => node.data?.type === BlockEnum.Start) + return (startNode?.data?.variables ?? []).filter(variable => variable.hide === true) +} + +export const getAppHiddenLaunchVariables = ({ + appInfo, + currentWorkflow, +}: { + appInfo: AppInfo + currentWorkflow: WorkflowLike +}) => { + if ([AppModeEnum.WORKFLOW, AppModeEnum.ADVANCED_CHAT].includes(appInfo.mode)) + return getWorkflowHiddenStartVariables(currentWorkflow) +} + +export const isWorkflowLaunchInputSupported = (variable: WorkflowHiddenStartVariable) => { + return SUPPORTED_WORKFLOW_LAUNCH_INPUT_TYPES.has(variable.type) +} + +export const createWorkflowLaunchInitialValues = (variables: WorkflowHiddenStartVariable[]) => { + return variables.reduce>((acc, variable) => { + acc[variable.variable] = coerceWorkflowLaunchDefaultValue(variable) + return acc + }, {}) +} + +export const buildWorkflowLaunchUrl = async ({ + accessibleUrl, + variables, + values, +}: { + accessibleUrl: string + variables: WorkflowHiddenStartVariable[] + values: Record +}) => { + const targetUrl = new URL(accessibleUrl, window.location.origin) + variables.forEach((variable) => { + const rawValue = values[variable.variable] + const serializedValue = variable.type === InputVarType.checkbox + ? String(Boolean(rawValue)) + : String(rawValue ?? '') + + targetUrl.searchParams.set(variable.variable, serializedValue) + }) + + return targetUrl.toString() +} + +export const getEmbeddedIframeSnippet = (iframeUrl: string) => + `` + +const getScriptInputsContent = (values: Record) => { + const entries = Object.entries(values) + + if (!entries.length) { + return `{ + // You can define the inputs from the Start node here + // key is the variable name + // e.g. + // name: "NAME" + }` + } + + return `{ +${entries.map(([key, value]) => ` ${key}: ${JSON.stringify(value)},`).join('\n')} + }` +} + +export const getEmbeddedScriptSnippet = ({ + url, + token, + primaryColor, + isTestEnv, + inputValues, +}: { + url: string + token: string + primaryColor: string + isTestEnv?: boolean + inputValues: Record +}) => + ` + +` + +export const getChromePluginContent = (iframeUrl: string) => `ChatBot URL: ${iframeUrl}` + +export const compressAndEncodeBase64 = async (input: string) => { + const uint8Array = new TextEncoder().encode(input) + if (typeof CompressionStream === 'undefined') + return btoa(String.fromCharCode(...uint8Array)) + + const compressedStream = new Response( + new Blob([uint8Array]) + .stream() + .pipeThrough(new CompressionStream('gzip')), + ).arrayBuffer() + const compressedUint8Array = new Uint8Array(await compressedStream) + return btoa(String.fromCharCode(...compressedUint8Array)) +} + export const getAppCardDisplayState = ({ appInfo, cardType, diff --git a/web/app/components/app/overview/app-card.tsx b/web/app/components/app/overview/app-card.tsx index b7ec4a2d81..9b1fc3a032 100644 --- a/web/app/components/app/overview/app-card.tsx +++ b/web/app/components/app/overview/app-card.tsx @@ -1,4 +1,5 @@ 'use client' +import type { WorkflowLaunchInputValue } from './app-card-utils' import type { ConfigParams } from './settings' import type { AppDetailResponse } from '@/models/app' import type { AppSSO } from '@/types/app' @@ -28,11 +29,16 @@ import { AppCardOperations, AppCardUrlSection, createAppCardOperations, + WorkflowLaunchDialog, } from './app-card-sections' import { + buildWorkflowLaunchUrl, + createWorkflowLaunchInitialValues, getAppCardDisplayState, getAppCardOperationKeys, + getAppHiddenLaunchVariables, isAppAccessConfigured, + isWorkflowLaunchInputSupported, } from './app-card-utils' export type IAppCardProps = { @@ -63,7 +69,8 @@ function AppCard({ const router = useRouter() const pathname = usePathname() const { isCurrentWorkspaceManager, isCurrentWorkspaceEditor } = useAppContext() - const { data: currentWorkflow } = useAppWorkflow(appInfo.mode === AppModeEnum.WORKFLOW ? appInfo.id : '') + const shouldFetchWorkflow = appInfo.mode === AppModeEnum.WORKFLOW || appInfo.mode === AppModeEnum.ADVANCED_CHAT + const { data: currentWorkflow } = useAppWorkflow(shouldFetchWorkflow ? appInfo.id : '') const docLink = useDocLink() const appDetail = useAppStore(state => state.appDetail) const setAppDetail = useAppStore(state => state.setAppDetail) @@ -73,6 +80,8 @@ function AppCard({ const [genLoading, setGenLoading] = useState(false) const [showConfirmDelete, setShowConfirmDelete] = useState(false) const [showAccessControl, setShowAccessControl] = useState(false) + const [showWorkflowLaunchDialog, setShowWorkflowLaunchDialog] = useState(false) + const [workflowLaunchValues, setWorkflowLaunchValues] = useState>({}) const { t } = useTranslation() const { data: systemFeatures } = useSuspenseQuery(systemFeaturesQueryOptions()) const { data: appAccessSubjects } = useAppWhiteListSubjects( @@ -98,6 +107,25 @@ function AppCard({ () => isAppAccessConfigured(appDetail, appAccessSubjects), [appAccessSubjects, appDetail], ) + const hiddenLaunchVariables = useMemo( + () => getAppHiddenLaunchVariables({ + appInfo, + currentWorkflow, + }) || [], + [appInfo, currentWorkflow], + ) + const supportedWorkflowLaunchVariables = useMemo( + () => hiddenLaunchVariables.filter(isWorkflowLaunchInputSupported), + [hiddenLaunchVariables], + ) + const unsupportedWorkflowLaunchVariables = useMemo( + () => hiddenLaunchVariables.filter(variable => !isWorkflowLaunchInputSupported(variable)), + [hiddenLaunchVariables], + ) + const initialWorkflowLaunchValues = useMemo( + () => createWorkflowLaunchInitialValues(supportedWorkflowLaunchVariables), + [supportedWorkflowLaunchVariables], + ) const onGenCode = async () => { if (!onGenerateCode) @@ -139,6 +167,31 @@ function AppCard({ window.open(cardState.accessibleUrl, '_blank') }, [cardState.accessibleUrl]) + const handleOpenWorkflowLaunchDialog = useCallback(() => { + setWorkflowLaunchValues(initialWorkflowLaunchValues) + setShowWorkflowLaunchDialog(true) + }, [initialWorkflowLaunchValues]) + + const handleWorkflowLaunchValueChange = useCallback((variable: string, value: WorkflowLaunchInputValue) => { + setWorkflowLaunchValues(prev => ({ + ...prev, + [variable]: value, + })) + }, []) + + const handleWorkflowLaunchConfirm = useCallback(async (event: React.FormEvent) => { + event.preventDefault() + + const targetUrl = await buildWorkflowLaunchUrl({ + accessibleUrl: cardState.accessibleUrl, + variables: supportedWorkflowLaunchVariables, + values: workflowLaunchValues, + }) + + window.open(targetUrl, '_blank') + setShowWorkflowLaunchDialog(false) + }, [cardState.accessibleUrl, supportedWorkflowLaunchVariables, workflowLaunchValues]) + const handleOpenCustomize = useCallback(() => { setShowCustomizeModal(true) }, []) @@ -304,7 +357,17 @@ function AppCard({ {!cardState.isMinimalState && (
{!isApp && } - + 0 + ? { + label: t('operation.config', { ns: 'common' }), + disabled: triggerModeDisabled || !cardState.runningStatus, + onClick: handleOpenWorkflowLaunchDialog, + } + : undefined} + />
)}
@@ -323,6 +386,17 @@ function AppCard({ onCloseAccessControl={() => setShowAccessControl(false)} onSaveSiteConfig={onSaveSiteConfig} onConfirmAccessControl={handleAccessControlUpdate} + hiddenInputs={hiddenLaunchVariables} + /> +
) diff --git a/web/app/components/app/overview/embedded/__tests__/index.spec.tsx b/web/app/components/app/overview/embedded/__tests__/index.spec.tsx index 0a843c26fd..a6e391cb0e 100644 --- a/web/app/components/app/overview/embedded/__tests__/index.spec.tsx +++ b/web/app/components/app/overview/embedded/__tests__/index.spec.tsx @@ -1,10 +1,11 @@ import type { SiteInfo } from '@/models/share' -import { fireEvent, render, screen } from '@testing-library/react' +import { fireEvent, render, screen, waitFor } from '@testing-library/react' import copy from 'copy-to-clipboard' import * as React from 'react' - import { act } from 'react' -import { afterAll, afterEach, describe, expect, it, vi } from 'vitest' + +import { afterAll, afterEach, beforeAll, describe, expect, it, vi } from 'vitest' +import { InputVarType } from '@/app/components/workflow/types' import Embedded from '../index' vi.mock('../style.module.css', () => ({ @@ -46,6 +47,7 @@ vi.mock('@/context/app-context', () => ({ })) const mockWindowOpen = vi.spyOn(window, 'open').mockImplementation(() => null) const mockedCopy = vi.mocked(copy) +const originalCompressionStream = globalThis.CompressionStream const siteInfo: SiteInfo = { title: 'test site', @@ -70,6 +72,22 @@ const getCopyButton = () => { } describe('Embedded', () => { + beforeAll(() => { + class MockCompressionStream { + readable: ReadableStream + writable: WritableStream + + constructor() { + const transformStream = new TransformStream() + this.readable = transformStream.readable + this.writable = transformStream.writable + } + } + + // @ts-expect-error test polyfill + globalThis.CompressionStream = MockCompressionStream + }) + afterEach(() => { vi.clearAllMocks() mockWindowOpen.mockClear() @@ -77,6 +95,7 @@ describe('Embedded', () => { afterAll(() => { mockWindowOpen.mockRestore() + globalThis.CompressionStream = originalCompressionStream }) it('builds theme and copies iframe snippet', async () => { @@ -84,14 +103,20 @@ describe('Embedded', () => { render() }) + await waitFor(() => { + expect(screen.getByText((content, node) => node?.tagName.toLowerCase() === 'pre' && content.includes('/chatbot/token'))).toBeInTheDocument() + }) + const actionButton = getCopyButton() const innerDiv = actionButton.querySelector('div') - act(() => { + await act(async () => { fireEvent.click(innerDiv ?? actionButton) }) expect(mockThemeBuilder.buildTheme).toHaveBeenCalledWith(siteInfo.chat_color_theme, siteInfo.chat_color_theme_inverted) - expect(mockedCopy).toHaveBeenCalledWith(expect.stringContaining('/chatbot/token')) + await waitFor(() => { + expect(mockedCopy).toHaveBeenCalledWith(expect.stringContaining('/chatbot/token')) + }) }) it('opens chrome plugin store link when chrome option selected', async () => { @@ -116,4 +141,106 @@ describe('Embedded', () => { 'noopener,noreferrer', ) }) + + it('keeps hidden inputs collapsed by default and updates iframe and script content when values change', async () => { + render( + , + ) + + expect(screen.queryByLabelText('Secret')).not.toBeInTheDocument() + + await act(async () => { + fireEvent.click(screen.getByText('appOverview.overview.appInfo.embedded.hiddenInputs.title').closest('button')!) + }) + + await waitFor(() => { + expect(screen.getByLabelText('Secret')).toBeInTheDocument() + }) + + await act(async () => { + fireEvent.change(screen.getByLabelText('Secret'), { + target: { value: 'top-secret' }, + }) + }) + + expect(document.querySelector('pre')?.textContent ?? '').toContain('/chatbot/token') + + await waitFor(() => { + const codeBlock = document.querySelector('pre') + expect(codeBlock?.textContent ?? '').toContain('/chatbot/token?secret=dG9wLXNlY3JldA%3D%3D') + }) + + const optionButtons = document.body.querySelectorAll('[class*="option"]') + act(() => { + fireEvent.click(optionButtons[1]!) + }) + + await waitFor(() => { + const codeBlock = document.querySelector('pre') + expect(codeBlock?.textContent ?? '').toContain('secret: "top-secret"') + }) + }) + + it('copies script content when scripts option is selected', async () => { + await act(async () => { + render() + }) + + const optionButtons = document.body.querySelectorAll('[class*="option"]') + act(() => { + fireEvent.click(optionButtons[1]!) + }) + + await waitFor(() => { + const codeBlock = document.querySelector('pre') + expect(codeBlock?.textContent ?? '').toContain('token: \'token\'') + }) + + const actionButton = getCopyButton() + const innerDiv = actionButton.querySelector('div') + await act(async () => { + fireEvent.click(innerDiv ?? actionButton) + }) + + await waitFor(() => { + expect(mockedCopy).toHaveBeenCalledWith(expect.stringContaining('token: \'token\'')) + }) + }) + + it('copies chrome plugin URL (without prefix) when chromePlugin option is selected', async () => { + await act(async () => { + render() + }) + + const optionButtons = document.body.querySelectorAll('[class*="option"]') + act(() => { + fireEvent.click(optionButtons[2]!) + }) + + await waitFor(() => { + const codeBlock = document.querySelector('pre') + expect(codeBlock?.textContent ?? '').toContain('ChatBot URL:') + }) + + const actionButton = getCopyButton() + const innerDiv = actionButton.querySelector('div') + await act(async () => { + fireEvent.click(innerDiv ?? actionButton) + }) + + await waitFor(() => { + expect(mockedCopy).toHaveBeenCalledWith(expect.stringContaining('/chatbot/token')) + expect(mockedCopy).not.toHaveBeenCalledWith(expect.stringContaining('ChatBot URL:')) + }) + }) }) diff --git a/web/app/components/app/overview/embedded/index.tsx b/web/app/components/app/overview/embedded/index.tsx index 12203178f1..112848760b 100644 --- a/web/app/components/app/overview/embedded/index.tsx +++ b/web/app/components/app/overview/embedded/index.tsx @@ -1,88 +1,46 @@ +import type { MutableRefObject } from 'react' +import type { WorkflowHiddenStartVariable, WorkflowLaunchInputValue } from '../app-card-utils' import type { SiteInfo } from '@/models/share' import { cn } from '@langgenius/dify-ui/cn' import { Dialog, DialogCloseButton, DialogContent, DialogTitle } from '@langgenius/dify-ui/dialog' import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' +import { + RiArrowDownSLine, + RiArrowRightSLine, +} from '@remixicon/react' import copy from 'copy-to-clipboard' -import * as React from 'react' -import { useState } from 'react' +import { Suspense, use, useEffect, useMemo, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import ActionButton from '@/app/components/base/action-button' import { useThemeContext } from '@/app/components/base/chat/embedded-chatbot/theme/theme-context' -import { IS_CE_EDITION } from '@/config' +import { InputVarType } from '@/app/components/workflow/types' import { useAppContext } from '@/context/app-context' import { basePath } from '@/utils/var' +import { + compressAndEncodeBase64, + createWorkflowLaunchInitialValues, + getChromePluginContent, + getEmbeddedIframeSnippet, + getEmbeddedScriptSnippet, + isWorkflowLaunchInputSupported, +} from '../app-card-utils' +import WorkflowHiddenInputFields from '../workflow-hidden-input-fields' import style from './style.module.css' type Props = { siteInfo?: SiteInfo isShow: boolean onClose: () => void - accessToken: string - appBaseUrl: string + accessToken?: string + appBaseUrl?: string + hiddenInputs?: WorkflowHiddenStartVariable[] className?: string } -const OPTION_MAP = { - iframe: { - getContent: (url: string, token: string) => - ``, - }, - scripts: { - getContent: (url: string, token: string, primaryColor: string, isTestEnv?: boolean) => - ` - -`, - }, - chromePlugin: { - getContent: (url: string, token: string) => `ChatBot URL: ${url}${basePath}/chatbot/${token}`, - }, -} +const OPTION_KEYS = ['iframe', 'scripts', 'chromePlugin'] as const const prefixEmbedded = 'overview.appInfo.embedded' -type Option = keyof typeof OPTION_MAP - -const OPTIONS: Option[] = ['iframe', 'scripts', 'chromePlugin'] +type Option = typeof OPTION_KEYS[number] const optionIconClassName: Record = { iframe: style.iframeIcon!, @@ -90,38 +48,274 @@ const optionIconClassName: Record = { chromePlugin: style.chromePluginIcon!, } -const Embedded = ({ siteInfo, isShow, onClose, appBaseUrl, accessToken, className }: Props) => { +const getSerializedHiddenInputValue = ( + variable: WorkflowHiddenStartVariable, + values: Record, +) => { + const rawValue = values[variable.variable] + if (variable.type === InputVarType.checkbox) + return String(Boolean(rawValue)) + + return String(rawValue ?? '') +} + +const buildEmbeddedIframeUrl = async ({ + appBaseUrl, + accessToken, + variables, + values, +}: { + appBaseUrl: string + accessToken: string + variables: WorkflowHiddenStartVariable[] + values: Record +}) => { + const iframeUrl = new URL(`${appBaseUrl}${basePath}/chatbot/${accessToken}`, window.location.origin) + + await Promise.all(variables.map(async (variable) => { + iframeUrl.searchParams.set(variable.variable, await compressAndEncodeBase64(getSerializedHiddenInputValue(variable, values))) + })) + + return iframeUrl.toString() +} + +const AsyncEmbeddedOptionContent = ({ + option, + iframeUrlPromise, + latestResolvedIframeUrlRef, +}: { + option: Option + iframeUrlPromise: Promise + latestResolvedIframeUrlRef: MutableRefObject +}) => { + const iframeUrl = use(iframeUrlPromise) + latestResolvedIframeUrlRef.current = iframeUrl + + if (option === 'chromePlugin') + return getChromePluginContent(iframeUrl) + + return getEmbeddedIframeSnippet(iframeUrl) +} + +const EmbeddedContent = ({ + siteInfo, + appBaseUrl, + accessToken, + hiddenInputs, +}: Required> & Pick) => { const { t } = useTranslation() + const supportedHiddenInputs = useMemo( + () => (hiddenInputs ?? []).filter(isWorkflowLaunchInputSupported), + [hiddenInputs], + ) + const initialHiddenInputValues = useMemo( + () => createWorkflowLaunchInitialValues(supportedHiddenInputs), + [supportedHiddenInputs], + ) const [option, setOption] = useState