diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 76fbd18f47..9c2c6e2ca9 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -116,6 +116,12 @@ jobs: if: github.event_name != 'merge_group' uses: ./.github/actions/setup-web + - name: Generate API docs + if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' + run: | + cd api + uv run dev/generate_swagger_markdown_docs.py --swagger-dir openapi --markdown-dir openapi/markdown + - name: ESLint autofix if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' run: | diff --git a/Makefile b/Makefile index d8c9df5208..ae7589bbd6 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ type-check: @echo "📝 Running type checks (basedpyright + pyrefly + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) @./dev/pyrefly-check-local - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Type checks complete" type-check-core: @echo "📝 Running core type checks (basedpyright + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --exclude 'dev/generate_fastopenapi_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Core type checks complete" test: diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py index 8d112c203b..0c5e23c29c 100644 --- a/api/controllers/common/schema.py +++ b/api/controllers/common/schema.py @@ -1,4 +1,10 @@ -"""Helpers for registering Pydantic models with Flask-RESTX namespaces.""" +"""Helpers for registering Pydantic models with Flask-RESTX namespaces. + +Flask-RESTX treats `SchemaModel` bodies as opaque JSON schemas; it does not +promote Pydantic's nested `$defs` into top-level Swagger `definitions`. +These helpers keep that translation centralized so models registered through +`register_schema_models` emit resolvable Swagger 2.0 references. +""" from enum import StrEnum @@ -8,10 +14,32 @@ from pydantic import BaseModel, TypeAdapter DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" -def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: - """Register a single BaseModel with a namespace for Swagger documentation.""" +def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None: + """Register a JSON schema and promote any nested Pydantic `$defs`.""" - namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) + nested_definitions = schema.get("$defs") + schema_to_register = dict(schema) + if isinstance(nested_definitions, dict): + schema_to_register.pop("$defs") + + namespace.schema_model(name, schema_to_register) + + if not isinstance(nested_definitions, dict): + return + + for nested_name, nested_schema in nested_definitions.items(): + if isinstance(nested_schema, dict): + _register_json_schema(namespace, nested_name, nested_schema) + + +def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: + """Register a BaseModel and its nested schema definitions for Swagger documentation.""" + + _register_json_schema( + namespace, + model.__name__, + model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), + ) def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: @@ -34,8 +62,10 @@ def get_or_create_model(model_name: str, field_def): def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: """Register multiple StrEnum with a namespace.""" for model in models: - namespace.schema_model( - model.__name__, TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + _register_json_schema( + namespace, + model.__name__, + TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), ) diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index dce394be97..a32c3420bb 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -12,6 +12,7 @@ from werkzeug.exceptions import BadRequest, NotFound, Unauthorized from configs import dify_config from constants.languages import supported_language +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import only_edition_cloud from core.db.session_factory import session_factory @@ -301,15 +302,7 @@ class BatchAddNotificationAccountsPayload(BaseModel): user_email: list[str] = Field(..., description="List of account email addresses") -console_ns.schema_model( - UpsertNotificationPayload.__name__, - UpsertNotificationPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - -console_ns.schema_model( - BatchAddNotificationAccountsPayload.__name__, - BatchAddNotificationAccountsPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, UpsertNotificationPayload, BatchAddNotificationAccountsPayload) @console_ns.route("/admin/upsert_notification") diff --git a/api/controllers/console/app/app_import.py b/api/controllers/console/app/app_import.py index e91dc9cfe5..b653016319 100644 --- a/api/controllers/console/app/app_import.py +++ b/api/controllers/console/app/app_import.py @@ -2,7 +2,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field from sqlalchemy.orm import Session -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console.app.wraps import get_app_model from controllers.console.wraps import ( account_initialization_required, @@ -33,6 +33,7 @@ class AppImportPayload(BaseModel): app_id: str | None = Field(None) +register_enum_models(console_ns, ImportStatus) register_schema_models(console_ns, AppImportPayload, Import, CheckDependenciesResult) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index c720a5e074..d4f501d34c 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -3,6 +3,7 @@ from collections.abc import Sequence from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( CompletionRequestError, @@ -19,13 +20,12 @@ from core.helper.code_executor.python3.python3_code_provider import Python3CodeP from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload from core.llm_generator.llm_generator import LLMGenerator from extensions.ext_database import db +from graphon.model_runtime.entities.llm_entities import LLMMode from graphon.model_runtime.errors.invoke import InvokeError from libs.login import current_account_with_tenant, login_required from models import App from services.workflow_service import WorkflowService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class InstructionGeneratePayload(BaseModel): flow_id: str = Field(..., description="Workflow/Flow ID") @@ -41,16 +41,16 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(RuleGeneratePayload) -reg(RuleCodeGeneratePayload) -reg(RuleStructuredOutputPayload) -reg(InstructionGeneratePayload) -reg(InstructionTemplatePayload) -reg(ModelConfig) +register_enum_models(console_ns, LLMMode) +register_schema_models( + console_ns, + RuleGeneratePayload, + RuleCodeGeneratePayload, + RuleStructuredOutputPayload, + InstructionGeneratePayload, + InstructionTemplatePayload, + ModelConfig, +) @console_ns.route("/rule-generate") diff --git a/api/dev/generate_fastopenapi_specs.py b/api/dev/generate_fastopenapi_specs.py new file mode 100644 index 0000000000..5a94d32b93 --- /dev/null +++ b/api/dev/generate_fastopenapi_specs.py @@ -0,0 +1,95 @@ +"""Generate FastOpenAPI OpenAPI 3.0 specs without booting the full backend.""" + +from __future__ import annotations + +import argparse +import json +import logging +import sys +from dataclasses import dataclass +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_swagger_specs import apply_runtime_defaults, drop_null_values, sort_openapi_arrays + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class FastOpenApiSpecTarget: + route: str + filename: str + + +FASTOPENAPI_SPEC_TARGETS: tuple[FastOpenApiSpecTarget, ...] = ( + FastOpenApiSpecTarget(route="/fastopenapi/openapi.json", filename="fastopenapi-console-openapi.json"), +) + + +def create_fastopenapi_spec_app(): + """Build a minimal Flask app that only mounts FastOpenAPI docs routes.""" + + apply_runtime_defaults() + + from app_factory import create_flask_app_with_configs + from extensions import ext_fastopenapi + + app = create_flask_app_with_configs() + ext_fastopenapi.init_app(app) + return app + + +def generate_fastopenapi_specs(output_dir: Path) -> list[Path]: + """Write FastOpenAPI specs to `output_dir` and return the written paths.""" + + output_dir.mkdir(parents=True, exist_ok=True) + + app = create_fastopenapi_spec_app() + client = app.test_client() + + written_paths: list[Path] = [] + for target in FASTOPENAPI_SPEC_TARGETS: + response = client.get(target.route) + if response.status_code != 200: + raise RuntimeError(f"failed to fetch {target.route}: {response.status_code}") + + payload = response.get_json() + if not isinstance(payload, dict): + raise RuntimeError(f"unexpected response payload for {target.route}") + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) + + output_path = output_dir / target.filename + output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") + written_paths.append(output_path) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-o", + "--output-dir", + type=Path, + default=Path("openapi"), + help="Directory where the OpenAPI JSON files will be written.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_fastopenapi_specs(args.output_dir) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_markdown_docs.py b/api/dev/generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..0900d08331 --- /dev/null +++ b/api/dev/generate_swagger_markdown_docs.py @@ -0,0 +1,161 @@ +"""Generate OpenAPI JSON specs and split Markdown API docs. + +The Markdown step uses `swagger-markdown`, the same converter family as the +Swagger Markdown UI, so CI and local regeneration catch converter-incompatible +OpenAPI output early. +""" + +from __future__ import annotations + +import argparse +import logging +import subprocess +import sys +import tempfile +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_fastopenapi_specs import FASTOPENAPI_SPEC_TARGETS, generate_fastopenapi_specs +from dev.generate_swagger_specs import SPEC_TARGETS, generate_specs + +logger = logging.getLogger(__name__) + +SWAGGER_MARKDOWN_PACKAGE = "swagger-markdown@3.0.0" +CONSOLE_SWAGGER_FILENAME = "console-swagger.json" +STALE_COMBINED_MARKDOWN_FILENAME = "api-reference.md" + + +def _convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + subprocess.run( + [ + "npx", + "--yes", + SWAGGER_MARKDOWN_PACKAGE, + "-i", + str(spec_path), + "-o", + str(markdown_path), + ], + check=True, + ) + + +def _demote_markdown_headings(markdown: str, *, levels: int = 1) -> str: + """Nest generated Markdown under another Markdown section.""" + + heading_prefix = "#" * levels + lines = [] + for line in markdown.splitlines(): + if line.startswith("#"): + lines.append(f"{heading_prefix}{line}") + else: + lines.append(line) + return "\n".join(lines).strip() + + +def _append_fastopenapi_markdown(console_markdown_path: Path, fastopenapi_markdown_path: Path) -> None: + """Append FastOpenAPI console docs to the existing console API Markdown.""" + + console_markdown = console_markdown_path.read_text(encoding="utf-8").rstrip() + fastopenapi_markdown = _demote_markdown_headings( + fastopenapi_markdown_path.read_text(encoding="utf-8"), + levels=2, + ) + console_markdown_path.write_text( + "\n\n".join( + [ + console_markdown, + "## FastOpenAPI Preview (OpenAPI 3.0)", + fastopenapi_markdown, + ] + ) + + "\n", + encoding="utf-8", + ) + + +def generate_markdown_docs( + swagger_dir: Path, + markdown_dir: Path, + *, + keep_swagger_json: bool = False, +) -> list[Path]: + """Generate intermediate specs, convert them to split Markdown API docs, and return Markdown paths.""" + + swagger_paths = generate_specs(swagger_dir) + fastopenapi_paths = generate_fastopenapi_specs(swagger_dir) + spec_paths = [*swagger_paths, *fastopenapi_paths] + swagger_paths_by_name = {path.name: path for path in swagger_paths} + fastopenapi_paths_by_name = {path.name: path for path in fastopenapi_paths} + + markdown_dir.mkdir(parents=True, exist_ok=True) + + written_paths: list[Path] = [] + try: + with tempfile.TemporaryDirectory(prefix="dify-api-docs-") as temp_dir: + temp_markdown_dir = Path(temp_dir) + + for target in SPEC_TARGETS: + swagger_path = swagger_paths_by_name[target.filename] + markdown_path = markdown_dir / f"{swagger_path.stem}.md" + _convert_spec_to_markdown(swagger_path, markdown_path) + written_paths.append(markdown_path) + + for target in FASTOPENAPI_SPEC_TARGETS: # type: ignore + fastopenapi_path = fastopenapi_paths_by_name[target.filename] + markdown_path = temp_markdown_dir / f"{fastopenapi_path.stem}.md" + _convert_spec_to_markdown(fastopenapi_path, markdown_path) + + console_markdown_path = markdown_dir / f"{Path(CONSOLE_SWAGGER_FILENAME).stem}.md" + _append_fastopenapi_markdown(console_markdown_path, markdown_path) + + (markdown_dir / STALE_COMBINED_MARKDOWN_FILENAME).unlink(missing_ok=True) + finally: + if not keep_swagger_json: + for path in spec_paths: + path.unlink(missing_ok=True) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--swagger-dir", + type=Path, + default=Path("openapi"), + help="Directory where intermediate JSON spec files will be written.", + ) + parser.add_argument( + "--markdown-dir", + type=Path, + default=Path("openapi/markdown"), + help="Directory where split Markdown API docs will be written.", + ) + parser.add_argument( + "--keep-swagger-json", + action="store_true", + help="Keep intermediate JSON spec files after Markdown generation.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_markdown_docs( + args.swagger_dir, + args.markdown_dir, + keep_swagger_json=args.keep_swagger_json, + ) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_specs.py b/api/dev/generate_swagger_specs.py index 7e9688bfb4..9122f3ab24 100644 --- a/api/dev/generate_swagger_specs.py +++ b/api/dev/generate_swagger_specs.py @@ -9,12 +9,15 @@ which is unnecessary when the goal is only to serialize the Flask-RESTX from __future__ import annotations import argparse +import hashlib import json import logging import os import sys +from collections.abc import MutableMapping from dataclasses import dataclass from pathlib import Path +from typing import Protocol, TypeGuard from flask import Flask from flask_restx.swagger import Swagger @@ -30,19 +33,110 @@ if str(API_ROOT) not in sys.path: class SpecTarget: route: str filename: str + namespace: str + + +class RestxApi(Protocol): + models: MutableMapping[str, object] + + def model(self, name: str, model: dict[object, object]) -> object: ... SPEC_TARGETS: tuple[SpecTarget, ...] = ( - SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json"), - SpecTarget(route="/api/swagger.json", filename="web-swagger.json"), - SpecTarget(route="/v1/swagger.json", filename="service-swagger.json"), + SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json", namespace="console"), + SpecTarget(route="/api/swagger.json", filename="web-swagger.json", namespace="web"), + SpecTarget(route="/v1/swagger.json", filename="service-swagger.json", namespace="service"), ) _ORIGINAL_REGISTER_MODEL = Swagger.register_model _ORIGINAL_REGISTER_FIELD = Swagger.register_field -def _apply_runtime_defaults() -> None: +def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]: + """Return whether a nested field map is an anonymous inline mapping.""" + + from flask_restx.model import Model, OrderedModel + + return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel)) + + +def _jsonable_schema_value(value: object) -> object: + """Return a deterministic JSON-serializable representation for schema fingerprints.""" + + if value is None or isinstance(value, str | int | float | bool): + return value + if isinstance(value, list | tuple): + return [_jsonable_schema_value(item) for item in value] + if isinstance(value, dict): + return {str(key): _jsonable_schema_value(item) for key, item in value.items()} + value_type = type(value) + return f"<{value_type.__module__}.{value_type.__qualname__}>" + + +def _field_signature(field: object) -> object: + """Build a stable signature for a Flask-RESTX field object.""" + + from flask_restx import fields + from flask_restx.model import instance + + field_instance = instance(field) + signature: dict[str, object] = { + "class": f"{field_instance.__class__.__module__}.{field_instance.__class__.__qualname__}" + } + + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + signature["nested"] = _inline_model_signature(nested) + else: + signature["nested"] = getattr( + nested, + "name", + f"<{type(nested).__module__}.{type(nested).__qualname__}>", + ) + elif hasattr(field_instance, "container"): + signature["container"] = _field_signature(field_instance.container) + else: + schema = getattr(field_instance, "__schema__", None) + if isinstance(schema, dict): + signature["schema"] = _jsonable_schema_value(schema) + + for attr_name in ( + "attribute", + "default", + "description", + "example", + "max", + "min", + "nullable", + "readonly", + "required", + "title", + ): + if hasattr(field_instance, attr_name): + signature[attr_name] = _jsonable_schema_value(getattr(field_instance, attr_name)) + + return signature + + +def _inline_model_signature(nested_fields: dict[object, object]) -> object: + """Build a stable signature for an anonymous inline model.""" + + return [ + (str(field_name), _field_signature(field)) + for field_name, field in sorted(nested_fields.items(), key=lambda item: str(item[0])) + ] + + +def _inline_model_name(nested_fields: dict[object, object]) -> str: + """Return a stable Swagger model name for an anonymous inline field map.""" + + signature = json.dumps(_inline_model_signature(nested_fields), sort_keys=True, separators=(",", ":")) + digest = hashlib.sha1(signature.encode("utf-8")).hexdigest()[:12] + return f"_AnonymousInlineModel_{digest}" + + +def apply_runtime_defaults() -> None: """Force the small config surface required for Swagger generation.""" os.environ.setdefault("SECRET_KEY", "spec-export") @@ -74,25 +168,26 @@ def _patch_swagger_for_inline_nested_dicts() -> None: anonymous_models = getattr(self, "_anonymous_inline_models", None) if anonymous_models is None: anonymous_models = {} - self._anonymous_inline_models = anonymous_models + self.__dict__["_anonymous_inline_models"] = anonymous_models anonymous_name = anonymous_models.get(id(nested_fields)) if anonymous_name is None: - anonymous_name = f"_AnonymousInlineModel{len(anonymous_models) + 1}" + anonymous_name = _inline_model_name(nested_fields) anonymous_models[id(nested_fields)] = anonymous_name - self.api.model(anonymous_name, nested_fields) + if anonymous_name not in self.api.models: + self.api.model(anonymous_name, nested_fields) return self.api.models[anonymous_name] def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: - if isinstance(model, dict): + if _is_inline_field_map(model): model = get_or_create_inline_model(self, model) return _ORIGINAL_REGISTER_MODEL(self, model) def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: nested = getattr(field, "nested", None) - if isinstance(nested, dict): + if _is_inline_field_map(nested): field.model = get_or_create_inline_model(self, nested) # type: ignore _ORIGINAL_REGISTER_FIELD(self, field) @@ -105,22 +200,169 @@ def _patch_swagger_for_inline_nested_dicts() -> None: def create_spec_app() -> Flask: """Build a minimal Flask app that only mounts the Swagger-producing blueprints.""" - _apply_runtime_defaults() + apply_runtime_defaults() _patch_swagger_for_inline_nested_dicts() app = Flask(__name__) from controllers.console import bp as console_bp + from controllers.console import console_ns from controllers.service_api import bp as service_api_bp + from controllers.service_api import service_api_ns from controllers.web import bp as web_bp + from controllers.web import web_ns app.register_blueprint(console_bp) app.register_blueprint(web_bp) app.register_blueprint(service_api_bp) + for namespace in (console_ns, web_ns, service_api_ns): + for api in namespace.apis: + _materialize_inline_model_definitions(api) + return app +def _registered_models(namespace: str) -> dict[str, object]: + """Return the Flask-RESTX models registered for a Swagger namespace.""" + + if namespace == "console": + from controllers.console import console_ns + + models = dict(console_ns.models) + for api in console_ns.apis: + models.update(api.models) + return models + if namespace == "web": + from controllers.web import web_ns + + models = dict(web_ns.models) + for api in web_ns.apis: + models.update(api.models) + return models + if namespace == "service": + from controllers.service_api import service_api_ns + + models = dict(service_api_ns.models) + for api in service_api_ns.apis: + models.update(api.models) + return models + + raise ValueError(f"unknown Swagger namespace: {namespace}") + + +def _materialize_inline_model_definitions(api: RestxApi) -> None: + """Convert inline `fields.Nested({...})` maps into named API models.""" + + from flask_restx import fields + from flask_restx.model import Model, OrderedModel, instance + + inline_models: dict[int, dict[object, object]] = {} + inline_model_names: dict[int, str] = {} + + def collect_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested) and id(nested) not in inline_models: + inline_models[id(nested)] = nested + for nested_field in nested.values(): + collect_field(nested_field) + + container = getattr(field_instance, "container", None) + if container is not None: + collect_field(container) + + for model in list(api.models.values()): + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + collect_field(field) + + for nested_fields in sorted(inline_models.values(), key=_inline_model_name): + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + + def model_name_for(nested_fields: dict[object, object]) -> str: + anonymous_name = inline_model_names.get(id(nested_fields)) + if anonymous_name is None: + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + return anonymous_name + + def materialize_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + field_instance.model = api.models[model_name_for(nested)] # type: ignore[attr-defined] + + container = getattr(field_instance, "container", None) + if container is not None: + materialize_field(container) + + index = 0 + while index < len(api.models): + model = list(api.models.values())[index] + index += 1 + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + materialize_field(field) + + +def drop_null_values(value: object) -> object: + """Remove JSON null values that make the Markdown converter crash.""" + + if isinstance(value, dict): + return {key: drop_null_values(item) for key, item in value.items() if item is not None} + if isinstance(value, list): + return [drop_null_values(item) for item in value] + return value + + +def sort_openapi_arrays(value: object, *, parent_key: str | None = None) -> object: + """Sort order-insensitive Swagger arrays so generated Markdown is stable.""" + + if isinstance(value, dict): + return {key: sort_openapi_arrays(item, parent_key=key) for key, item in value.items()} + if not isinstance(value, list): + return value + + sorted_items = [sort_openapi_arrays(item, parent_key=parent_key) for item in value] + if parent_key == "parameters": + return sorted( + sorted_items, + key=lambda item: ( + item.get("in", "") if isinstance(item, dict) else "", + item.get("name", "") if isinstance(item, dict) else "", + json.dumps(item, sort_keys=True, default=str), + ), + ) + if parent_key in {"enum", "required", "schemes", "tags"}: + string_items = [item for item in sorted_items if isinstance(item, str)] + if len(string_items) == len(sorted_items): + return sorted(string_items) + return sorted_items + + +def _merge_registered_definitions(payload: dict[str, object], namespace: str) -> dict[str, object]: + """Include registered but route-indirect models in the exported Swagger definitions.""" + + definitions = payload.setdefault("definitions", {}) + if not isinstance(definitions, dict): + raise RuntimeError("unexpected Swagger definitions payload") + + for name, model in _registered_models(namespace).items(): + schema = getattr(model, "__schema__", None) + if isinstance(schema, dict): + definitions.setdefault(name, schema) + + return payload + + def generate_specs(output_dir: Path) -> list[Path]: """Write all Swagger specs to `output_dir` and return the written paths.""" @@ -138,6 +380,9 @@ def generate_specs(output_dir: Path) -> list[Path]: payload = response.get_json() if not isinstance(payload, dict): raise RuntimeError(f"unexpected response payload for {target.route}") + payload = _merge_registered_definitions(payload, target.namespace) + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) output_path = output_dir / target.filename output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") diff --git a/api/openapi/markdown/console-swagger.md b/api/openapi/markdown/console-swagger.md new file mode 100644 index 0000000000..a69cecd83c --- /dev/null +++ b/api/openapi/markdown/console-swagger.md @@ -0,0 +1,14766 @@ +# Console API +Console management APIs for app configuration, monitoring, and administration + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## console +Console management API operations + +### /account/avatar + +#### GET +##### Description + +Get account avatar url + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarQuery](#accountavatarquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarPayload](#accountavatarpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailSendPayload](#changeemailsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/check-email-unique + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CheckEmailUniquePayload](#checkemailuniquepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/reset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailResetPayload](#changeemailresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailValidityPayload](#changeemailvaliditypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletePayload](#accountdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/feedback + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletionFeedbackPayload](#accountdeletionfeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/verify + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationStatusResponse](#educationstatusresponse) | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationActivatePayload](#educationactivatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education/autocomplete + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationAutocompleteQuery](#educationautocompletequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationAutocompleteResponse](#educationautocompleteresponse) | + +### /account/education/verify + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationVerifyResponse](#educationverifyresponse) | + +### /account/init + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInitPayload](#accountinitpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/integrates + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountIntegrateListResponse](#accountintegratelistresponse) | + +### /account/interface-language + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceLanguagePayload](#accountinterfacelanguagepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/interface-theme + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceThemePayload](#accountinterfacethemepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountNamePayload](#accountnamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountPasswordPayload](#accountpasswordpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/profile + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/timezone + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountTimezonePayload](#accounttimezonepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /activate + +#### POST +##### Description + +Activate account with invitation token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivatePayload](#activatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Account activated successfully | [ActivationResponse](#activationresponse) | +| 400 | Already activated or invalid token | | + +### /activate/check + +#### GET +##### Description + +Check if activation token is valid + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivateCheckQuery](#activatecheckquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ActivationCheckResponse](#activationcheckresponse) | + +### /admin/batch_add_notification_accounts + +#### POST +##### Description + +Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a 'file' field (CSV or TXT, one email per line) plus a 'notification_id' field. Emails that do not match any account are silently skipped. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Accounts added successfully | + +### /admin/delete-explore-banner/{banner_id} + +#### DELETE +##### Description + +Delete an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| banner_id | path | Banner ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Banner deleted successfully | + +### /admin/insert-explore-apps + +#### POST +##### Description + +Insert or update an app in the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreAppPayload](#insertexploreapppayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | App updated successfully | +| 201 | App inserted successfully | +| 404 | App not found | + +### /admin/insert-explore-apps/{app_id} + +#### DELETE +##### Description + +Remove an app from the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID to remove | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App removed successfully | + +### /admin/insert-explore-banner + +#### POST +##### Description + +Insert an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreBannerPayload](#insertexplorebannerpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Banner inserted successfully | + +### /admin/upsert_notification + +#### POST +##### Description + +Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpsertNotificationPayload](#upsertnotificationpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Notification upserted successfully | + +### /all-workspaces + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceListQuery](#workspacelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-based-extension + +#### GET +##### Description + +Get all API-based extensions for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionListResponse](#apibasedextensionlistresponse) | + +#### POST +##### Description + +Create a new API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Extension created successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-based-extension/{id} + +#### DELETE +##### Description + +Delete API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Extension deleted successfully | + +#### GET +##### Description + +Get API-based extension by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +#### POST +##### Description + +Update API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Extension updated successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-key-auth/data-source + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/binding + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiKeyAuthBindingPayload](#apikeyauthbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/{binding_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/prompt-templates + +#### GET +##### Description + +Get advanced prompt templates based on app mode and model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedPromptTemplateQuery](#advancedprompttemplatequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Prompt templates retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps + +#### GET +##### Summary + +Get app list + +##### Description + +Get list of applications with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppListQuery](#applistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppPagination](#apppagination) | + +#### POST +##### Summary + +Create app + +##### Description + +Create a new application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAppPayload](#createapppayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App created successfully | [AppDetail](#appdetail) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppImportPayload](#appimportpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import completed | [Import](#import) | +| 202 | Import pending confirmation | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/imports/{app_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dependencies checked | [CheckDependenciesResult](#checkdependenciesresult) | + +### /apps/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import confirmed | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/workflows/online-users + +#### POST +##### Description + +Get workflow online users + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowOnlineUsersPayload](#workflowonlineuserspayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id} + +#### DELETE +##### Summary + +Delete app + +##### Description + +Delete application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App deleted successfully | +| 403 | Insufficient permissions | + +#### GET +##### Summary + +Get app detail + +##### Description + +Get application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppDetailWithSite](#appdetailwithsite) | + +#### PUT +##### Summary + +Update app + +##### Description + +Update application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAppPayload](#updateapppayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App updated successfully | [AppDetailWithSite](#appdetailwithsite) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/advanced-chat/workflow-runs + +#### GET +##### Summary + +Get advanced chat app workflow run list + +##### Description + +Get advanced chat workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [AdvancedChatWorkflowRunPagination](#advancedchatworkflowrunpagination) | + +### /apps/{app_id}/advanced-chat/workflow-runs/count + +#### GET +##### Summary + +Get advanced chat workflow runs count statistics + +##### Description + +Get advanced chat workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow for advanced chat application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedChatWorkflowRunPayload](#advancedchatworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow run started successfully | +| 400 | Invalid request parameters | +| 403 | Permission denied | + +### /apps/{app_id}/agent/logs + +#### GET +##### Summary + +Get agent logs + +##### Description + +Get agent execution logs for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AgentLogQuery](#agentlogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Agent logs retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps/{app_id}/annotation-reply/{action} + +#### POST +##### Description + +Enable or disable annotation reply for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyPayload](#annotationreplypayload) | +| action | path | Action to perform (enable/disable) | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-reply/{action}/status/{job_id} + +#### GET +##### Description + +Get status of annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-setting + +#### GET +##### Description + +Get annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotation settings retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-settings/{annotation_setting_id} + +#### POST +##### Description + +Update annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationSettingUpdatePayload](#annotationsettingupdatepayload) | +| annotation_setting_id | path | Annotation setting ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Settings updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get annotations for an app with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationListQuery](#annotationlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotations retrieved successfully | +| 403 | Insufficient permissions | + +#### POST +##### Description + +Create a new annotation for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAnnotationPayload](#createannotationpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/batch-import + +#### POST +##### Description + +Batch import annotations from CSV file with rate limiting and security checks + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Batch import started successfully | +| 400 | No file uploaded or too many files | +| 403 | Insufficient permissions | +| 413 | File too large | +| 429 | Too many requests or concurrent imports | + +### /apps/{app_id}/annotations/batch-import-status/{job_id} + +#### GET +##### Description + +Get status of batch import job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations/count + +#### GET +##### Description + +Get count of message annotations for the app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation count retrieved successfully | [AnnotationCountResponse](#annotationcountresponse) | + +### /apps/{app_id}/annotations/export + +#### GET +##### Description + +Export all annotations for an app with CSV injection protection + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations exported successfully | [AnnotationExportList](#annotationexportlist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | | Yes | string | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Description + +Update or delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAnnotationPayload](#updateannotationpayload) | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 204 | Annotation deleted successfully | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id}/hit-histories + +#### GET +##### Description + +Get hit histories for an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | +| limit | query | Page size | No | integer | +| page | query | Page number | No | integer | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit histories retrieved successfully | [AnnotationHitHistoryList](#annotationhithistorylist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/api-enable + +#### POST +##### Description + +Enable or disable app API + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppApiStatusPayload](#appapistatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/audio-to-text + +#### POST +##### Description + +Transcript audio to text for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Audio transcription successful | [AudioTranscriptResponse](#audiotranscriptresponse) | +| 400 | Bad request - No audio uploaded or unsupported type | | +| 413 | Audio file too large | | + +### /apps/{app_id}/chat-conversations + +#### GET +##### Description + +Get chat conversations with pagination, filtering and summary + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatConversationQuery](#chatconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationWithSummaryPagination](#conversationwithsummarypagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/chat-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a chat conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get chat conversation details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationDetail](#conversationdetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages + +#### GET +##### Description + +Get chat messages for a conversation with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagesQuery](#chatmessagesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [MessageInfiniteScrollPaginationResponse](#messageinfinitescrollpaginationresponse) | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested questions for a message + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Suggested questions retrieved successfully | [SuggestedQuestionsResponse](#suggestedquestionsresponse) | +| 404 | Message or conversation not found | | + +### /apps/{app_id}/chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/completion-conversations + +#### GET +##### Description + +Get completion conversations with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionConversationQuery](#completionconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationPagination](#conversationpagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/completion-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a completion conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get completion conversation details with messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationMessageDetail](#conversationmessagedetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/completion-messages + +#### POST +##### Description + +Generate completion message for debugging + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion generated successfully | +| 400 | Invalid request parameters | +| 404 | App not found | + +### /apps/{app_id}/completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/conversation-variables + +#### GET +##### Description + +Get conversation variables for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [PaginatedConversationVariableResponse](#paginatedconversationvariableresponse) | + +### /apps/{app_id}/convert-to-workflow + +#### POST +##### Summary + +Convert basic mode of chatbot app to workflow mode + +##### Description + +Convert application to workflow mode +Convert expert mode of chatbot app to workflow mode +Convert Completion App to Workflow App + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConvertToWorkflowPayload](#converttoworkflowpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application converted to workflow successfully | +| 400 | Application cannot be converted | +| 403 | Permission denied | + +### /apps/{app_id}/copy + +#### POST +##### Summary + +Copy app + +##### Description + +Create a copy of an existing application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CopyAppPayload](#copyapppayload) | +| app_id | path | Application ID to copy | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App copied successfully | [AppDetailWithSite](#appdetailwithsite) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/export + +#### GET +##### Summary + +Export app + +##### Description + +Export application configuration as DSL + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppExportQuery](#appexportquery) | +| app_id | path | Application ID to export | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App exported successfully | [AppExportResponse](#appexportresponse) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/feedbacks + +#### POST +##### Description + +Create or update message feedback (like/dislike) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback updated successfully | +| 403 | Insufficient permissions | +| 404 | Message not found | + +### /apps/{app_id}/feedbacks/export + +#### GET +##### Description + +Export user feedback data for Google Sheets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackExportQuery](#feedbackexportquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback data exported successfully | +| 400 | Invalid parameters | +| 500 | Internal server error | + +### /apps/{app_id}/icon + +#### POST +##### Description + +Update application icon + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppIconPayload](#appiconpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Icon updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/messages/{message_id} + +#### GET +##### Description + +Get message details by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Message retrieved successfully | [MessageDetailResponse](#messagedetailresponse) | +| 404 | Message not found | | + +### /apps/{app_id}/model-config + +#### POST +##### Summary + +Modify app model config + +##### Description + +Update application model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ModelConfigRequest](#modelconfigrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Model configuration updated successfully | +| 400 | Invalid configuration | +| 404 | App not found | + +### /apps/{app_id}/name + +#### POST +##### Description + +Check if app name is available + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppNamePayload](#appnamepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Name availability checked | [AppDetail](#appdetail) | + +### /apps/{app_id}/publish-to-creators-platform + +#### POST +##### Summary + +Publish app to Creators Platform + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/server + +#### GET +##### Description + +Get MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration retrieved successfully | [AppMCPServerResponse](#appmcpserverresponse) | + +#### POST +##### Description + +Create MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerCreatePayload](#mcpservercreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | MCP server configuration created successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | + +#### PUT +##### Description + +Update MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerUpdatePayload](#mcpserverupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration updated successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /apps/{app_id}/site + +#### POST +##### Description + +Update application site configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteUpdatePayload](#appsiteupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site configuration updated successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions | | +| 404 | App not found | | + +### /apps/{app_id}/site-enable + +#### POST +##### Description + +Enable or disable app site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteStatusPayload](#appsitestatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/site/access-token-reset + +#### POST +##### Description + +Reset access token for application site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Access token reset successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions (admin/owner required) | | +| 404 | App or site not found | | + +### /apps/{app_id}/statistics/average-response-time + +#### GET +##### Description + +Get average response time statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average response time statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/average-session-interactions + +#### GET +##### Description + +Get average session interaction statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average session interaction statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-conversations + +#### GET +##### Description + +Get daily conversation statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily conversation statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-end-users + +#### GET +##### Description + +Get daily terminal/end-user statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily terminal statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-messages + +#### GET +##### Description + +Get daily message statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily message statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/token-costs + +#### GET +##### Description + +Get daily token cost statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily token cost statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/tokens-per-second + +#### GET +##### Description + +Get tokens per second statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tokens per second statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/user-satisfaction-rate + +#### GET +##### Description + +Get user satisfaction rate statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | User satisfaction rate statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/text-to-audio + +#### POST +##### Description + +Convert text to speech for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechPayload](#texttospeechpayload) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text to speech conversion successful | +| 400 | Bad request - Invalid parameters | + +### /apps/{app_id}/text-to-audio/voices + +#### GET +##### Description + +Get available TTS voices for a specific language + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechVoiceQuery](#texttospeechvoicequery) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | TTS voices retrieved successfully | [ object ] | +| 400 | Invalid language parameter | | + +### /apps/{app_id}/trace + +#### GET +##### Summary + +Get app trace + +##### Description + +Get app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration retrieved successfully | + +#### POST +##### Description + +Update app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppTracePayload](#apptracepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/trace-config + +#### DELETE +##### Summary + +Delete an existing trace app configuration + +##### Description + +Delete an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tracing configuration deleted successfully | +| 400 | Invalid request parameters or configuration not found | + +#### GET +##### Description + +Get tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration retrieved successfully | object | +| 400 | Invalid request parameters | | + +#### PATCH +##### Summary + +Update an existing trace app configuration + +##### Description + +Update an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration updated successfully | object | +| 400 | Invalid request parameters or configuration not found | | + +#### POST +##### Summary + +Create a new trace app configuration + +##### Description + +Create a new tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Tracing configuration created successfully | object | +| 400 | Invalid request parameters or configuration already exists | | + +### /apps/{app_id}/trigger-enable + +#### POST +##### Summary + +Update app trigger (enable/disable) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ParserEnable](#parserenable) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerResponse](#workflowtriggerresponse) | + +### /apps/{app_id}/triggers + +#### GET +##### Summary + +Get app triggers list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerListResponse](#workflowtriggerlistresponse) | + +### /apps/{app_id}/workflow-app-logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow application execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow app logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | + +### /apps/{app_id}/workflow-archived-logs + +#### GET +##### Summary + +Get workflow archived logs + +##### Description + +Get workflow archived execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow archived logs retrieved successfully | [WorkflowArchivedLogPaginationResponse](#workflowarchivedlogpaginationresponse) | + +### /apps/{app_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Description + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [WorkflowRunPagination](#workflowrunpagination) | + +### /apps/{app_id}/workflow-runs/count + +#### GET +##### Summary + +Get workflow runs count statistics + +##### Description + +Get workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 403 | Permission denied | +| 404 | Task not found | + +### /apps/{app_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Description + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run detail retrieved successfully | [WorkflowRunDetail](#workflowrundetail) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow-runs/{run_id}/export + +#### GET +##### Description + +Generate a download URL for an archived workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Export URL generated | [WorkflowRunExport](#workflowrunexport) | + +### /apps/{app_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Description + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node executions retrieved successfully | [WorkflowRunNodeExecutionList](#workflowrunnodeexecutionlist) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow/comments + +#### GET +##### Summary + +Get all comments for a workflow + +##### Description + +Get all comments for a workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comments retrieved successfully | [WorkflowCommentBasic](#workflowcommentbasic) | + +#### POST +##### Summary + +Create a new workflow comment + +##### Description + +Create a new workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentCreatePayload](#workflowcommentcreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Comment created successfully | [WorkflowCommentCreate](#workflowcommentcreate) | + +### /apps/{app_id}/workflow/comments/mention-users + +#### GET +##### Summary + +Get all users in current tenant for mentions + +##### Description + +Get all users in current tenant for mentions + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Mentionable users retrieved successfully | [WorkflowCommentMentionUsersPayload](#workflowcommentmentionuserspayload) | + +### /apps/{app_id}/workflow/comments/{comment_id} + +#### DELETE +##### Summary + +Delete a workflow comment + +##### Description + +Delete a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Comment deleted successfully | + +#### GET +##### Summary + +Get a specific workflow comment + +##### Description + +Get a specific workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment retrieved successfully | [WorkflowCommentDetail](#workflowcommentdetail) | + +#### PUT +##### Summary + +Update a workflow comment + +##### Description + +Update a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentUpdatePayload](#workflowcommentupdatepayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment updated successfully | [WorkflowCommentUpdate](#workflowcommentupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies + +#### POST +##### Summary + +Add a reply to a workflow comment + +##### Description + +Add a reply to a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Reply created successfully | [WorkflowCommentReplyCreate](#workflowcommentreplycreate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id} + +#### DELETE +##### Summary + +Delete a comment reply + +##### Description + +Delete a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Reply deleted successfully | + +#### PUT +##### Summary + +Update a comment reply + +##### Description + +Update a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Reply updated successfully | [WorkflowCommentReplyUpdate](#workflowcommentreplyupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/resolve + +#### POST +##### Summary + +Resolve a workflow comment + +##### Description + +Resolve a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment resolved successfully | [WorkflowCommentResolve](#workflowcommentresolve) | + +### /apps/{app_id}/workflow/statistics/average-app-interactions + +#### GET +##### Description + +Get workflow average app interaction statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Average app interaction statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-conversations + +#### GET +##### Description + +Get workflow daily runs statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily runs statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-terminals + +#### GET +##### Description + +Get workflow daily terminals statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily terminals statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/token-costs + +#### GET +##### Description + +Get workflow daily token cost statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily token cost statistics retrieved successfully | + +### /apps/{app_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Description + +Get all published workflows for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowListQuery](#workflowlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflows retrieved successfully | [WorkflowPagination](#workflowpagination) | + +### /apps/{app_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configurations for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configurations retrieved successfully | + +### /apps/{app_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configuration by type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DefaultBlockConfigQuery](#defaultblockconfigquery) | +| app_id | path | Application ID | Yes | string | +| block_type | path | Block type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configuration retrieved successfully | +| 404 | Block type not found | + +### /apps/{app_id}/workflows/draft + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Draft workflow not found | | + +#### POST +##### Summary + +Sync draft workflow + +##### Description + +Sync draft workflow configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SyncDraftWorkflowPayload](#syncdraftworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow synced successfully | [SyncDraftWorkflowResponse](#syncdraftworkflowresponse) | +| 400 | Invalid workflow configuration | | +| 403 | Permission denied | | + +### /apps/{app_id}/workflows/draft/conversation-variables + +#### GET +##### Description + +Get conversation variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | +| 404 | Draft workflow not found | | + +#### POST +##### Description + +Update conversation variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation variables updated successfully | + +### /apps/{app_id}/workflows/draft/environment-variables + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get environment variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables retrieved successfully | +| 404 | Draft workflow not found | + +#### POST +##### Description + +Update environment variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EnvironmentVariableUpdatePayload](#environmentvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables updated successfully | + +### /apps/{app_id}/workflows/draft/features + +#### POST +##### Description + +Update draft workflow features + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowFeaturesPayload](#workflowfeaturespayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow features updated successfully | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test + +#### POST +##### Summary + +Test human input delivery + +##### Description + +Test human input delivery for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputDeliveryTestPayload](#humaninputdeliverytestpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Description + +Get last run result for draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node last run retrieved successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node last run not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Description + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowNodeRunPayload](#draftworkflownoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node run started successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute single node when event arrives + +##### Description + +Poll for trigger events and execute single node when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and node executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Description + +Delete all variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Node variables deleted successfully | + +#### GET +##### Description + +Get variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Draft workflow run started successfully | +| 403 | Permission denied | + +### /apps/{app_id}/workflows/draft/system-variables + +#### GET +##### Description + +Get system variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | System variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute full workflow when event arrives + +##### Description + +Poll for trigger events and execute full workflow when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunRequest](#draftworkflowtriggerrunrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/trigger/run-all + +#### POST +##### Summary + +Full workflow debug when the start node is a trigger + +##### Description + +Full workflow debug when the start node is a trigger + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunAllPayload](#draftworkflowtriggerrunallpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/variables + +#### DELETE +##### Description + +Delete all draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Workflow variables deleted successfully | + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableListQuery](#workflowdraftvariablelistquery) | +| app_id | path | Application ID | Yes | string | +| limit | query | Number of items per page (1-100) | No | string | +| page | query | Page number (1-100000) | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow variables retrieved successfully | [WorkflowDraftVariableListWithoutValue](#workflowdraftvariablelistwithoutvalue) | + +### /apps/{app_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Description + +Delete a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Variable deleted successfully | +| 404 | Variable not found | + +#### GET +##### Description + +Get a specific workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable retrieved successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +#### PATCH +##### Description + +Update a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableUpdatePayload](#workflowdraftvariableupdatepayload) | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Description + +Reset a workflow variable to its default value + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable reset successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 204 | Variable reset (no content) | | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/publish + +#### GET +##### Summary + +Get published workflow + +##### Description + +Get published workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Published workflow not found | | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PublishWorkflowPayload](#publishworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/triggers/webhook + +#### GET +##### Summary + +Get webhook trigger for a node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WebhookTriggerResponse](#webhooktriggerresponse) | + +### /apps/{app_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Description + +Update workflow by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowUpdatePayload](#workflowupdatepayload) | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Workflow ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow updated successfully | [Workflow](#workflow) | +| 403 | Permission denied | | +| 404 | Workflow not found | | + +### /apps/{app_id}/workflows/{workflow_id}/restore + +#### POST +##### Description + +Restore a published workflow version into the draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Published workflow ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow restored successfully | +| 400 | Source workflow must be published | +| 404 | Workflow not found | + +### /apps/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for an app + +##### Description + +Get all API keys for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for an app + +##### Description + +Create a new API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /apps/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for an app + +##### Description + +Delete an API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /apps/{server_id}/server/refresh + +#### GET +##### Description + +Refresh MCP server configuration and regenerate server code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| server_id | path | Server ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server refreshed successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /auth/plugin/datasource/default-list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialPayload](#datasourcecredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCustomClientPayload](#datasourcecustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/default + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceDefaultPayload](#datasourcedefaultpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialDeletePayload](#datasourcecredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialUpdatePayload](#datasourcecredentialupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update-name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceUpdateNamePayload](#datasourceupdatenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/invoices + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/partners/{partner_key}/tenants + +#### PUT +##### Description + +Sync partner tenants bindings + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PartnerTenantsPayload](#partnertenantspayload) | +| partner_key | path | Partner key | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tenants synced to partner successfully | +| 400 | Invalid partner information | + +### /billing/subscription + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /code-based-extension + +#### GET +##### Description + +Get code-based extension data by module name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| module | query | Extension module name | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [CodeBasedExtensionResponse](#codebasedextensionresponse) | + +### /compliance/download + +#### GET +##### Description + +Get compliance document download link + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ComplianceDownloadQuery](#compliancedownloadquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates/{binding_id}/{action} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets + +#### GET +##### Description + +Get list of datasets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| ids | query | Filter by dataset IDs (list) | No | string | +| include_all | query | Include all datasets (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| tag_ids | query | Filter by tag IDs (list) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | + +#### POST +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Dataset created successfully | +| 400 | Invalid request parameters | + +### /datasets/api-base-info + +#### GET +##### Description + +Get dataset API base information + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | API base info retrieved successfully | + +### /datasets/api-keys + +#### GET +##### Description + +Get dataset API keys + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/api-keys/{api_key_id} + +#### DELETE +##### Description + +Delete dataset API key + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /datasets/batch_import_status/{job_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external + +#### POST +##### Description + +Create external knowledge dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalDatasetCreatePayload](#externaldatasetcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | External dataset created successfully | [DatasetDetail](#datasetdetail) | +| 400 | Invalid parameters | | +| 403 | Permission denied | | + +### /datasets/external-knowledge-api + +#### GET +##### Description + +Get external knowledge API templates + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API templates retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get external knowledge API template details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API template retrieved successfully | +| 404 | Template not found | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id}/use-check + +#### GET +##### Description + +Check if external knowledge API is being used + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Usage check completed successfully | + +### /datasets/indexing-estimate + +#### POST +##### Description + +Estimate dataset indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IndexingEstimatePayload](#indexingestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | + +### /datasets/init + +#### POST +##### Description + +Initialize dataset with documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Dataset initialized successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | +| 400 | Invalid request parameters | | + +### /datasets/metadata/built-in + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/notion-indexing-estimate + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/process-rule + +#### GET +##### Description + +Get dataset document processing rules + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| document_id | query | Document ID (optional) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Process rules retrieved successfully | + +### /datasets/retrieval-setting + +#### GET +##### Description + +Get dataset retrieval settings + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Retrieval settings retrieved successfully | + +### /datasets/retrieval-setting/{vector_type} + +#### GET +##### Description + +Get mock dataset retrieval settings by vector type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| vector_type | path | Vector store type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Mock retrieval settings retrieved successfully | + +### /datasets/{dataset_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset retrieved successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +#### PATCH +##### Description + +Update dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset updated successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/api-keys/{status} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| status | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/auto-disable-logs + +#### GET +##### Description + +Get dataset auto disable logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Auto disable logs retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/batch/{batch}/indexing-estimate + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/batch/{batch}/indexing-status + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| fetch | query | Fetch full details (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| sort | query | Sort order (default: -created_at) | No | string | +| status | query | Filter documents by display status | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Documents created successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Summary + +Stream a ZIP archive containing the requested uploaded documents + +##### Description + +Download selected dataset documents as a single ZIP archive (upload-file only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/generate-summary + +#### POST +##### Summary + +Generate summary index for specified documents + +##### Description + +Generate summary index for documents +This endpoint checks if the dataset configuration supports summary generation +(indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), +then asynchronously generates summary indexes for the provided documents. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [GenerateSummaryPayload](#generatesummarypayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary generation started successfully | +| 400 | Invalid request or dataset configuration | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/status/{action}/batch + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get document details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| metadata | query | Metadata inclusion (all/only/without) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a dataset document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-estimate + +#### GET +##### Description + +Estimate document indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | +| 400 | Document already finished | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-status + +#### GET +##### Description + +Get document indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/metadata + +#### PUT +##### Description + +Update document metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentMetadataUpdatePayload](#documentmetadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document metadata updated successfully | +| 403 | Permission denied | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/pause + +#### PATCH +##### Summary + +pause document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/resume + +#### PATCH +##### Summary + +recover document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/{action} + +#### PATCH +##### Description + +Update document processing status (pause/resume) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform (pause/resume) | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Processing status updated successfully | +| 400 | Invalid action | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/rename + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRenamePayload](#documentrenamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Document renamed successfully | [DocumentResponse](#documentresponse) | + +### /datasets/{dataset_id}/documents/{document_id}/segment + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segment/{action} + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/batch_import + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/summary-status + +#### GET +##### Summary + +Get summary index generation status for a document + +##### Description + +Get summary index generation status for a document +Returns: +- total_segments: Total number of segments in the document +- summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records +- summaries: List of summary records with status and content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/website-sync + +#### GET +##### Summary + +sync website document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/error-docs + +#### GET +##### Description + +Get dataset error documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Error documents retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/external-hit-testing + +#### POST +##### Description + +Test external knowledge retrieval for dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalHitTestingPayload](#externalhittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External hit testing completed successfully | +| 400 | Invalid parameters | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Description + +Test dataset knowledge retrieval + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit testing completed successfully | [HitTestingResponse](#hittestingresponse) | +| 400 | Invalid parameters | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/indexing-status + +#### GET +##### Description + +Get dataset indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/permission-part-users + +#### GET +##### Description + +Get dataset permission user list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Permission users retrieved successfully | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/queries + +#### GET +##### Description + +Get dataset query history + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Query history retrieved successfully | [DatasetQueryDetail](#datasetquerydetail) | + +### /datasets/{dataset_id}/related-apps + +#### GET +##### Description + +Get applications related to dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Related apps retrieved successfully | [RelatedAppList](#relatedapplist) | + +### /datasets/{dataset_id}/retry + +#### POST +##### Summary + +retry document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRetryPayload](#documentretrypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/use-check + +#### GET +##### Description + +Check if dataset is in use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset use status retrieved successfully | + +### /datasets/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for a dataset + +##### Description + +Get all API keys for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for a dataset + +##### Description + +Create a new API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for a dataset + +##### Description + +Delete an API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /email-code-login + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-code-login/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginPayload](#emailcodeloginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/send-email + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/validity + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /explore/apps + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RecommendedAppsQuery](#recommendedappsquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [RecommendedAppListResponse](#recommendedapplistresponse) | + +### /explore/apps/{app_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /features + +#### GET +##### Summary + +Get feature configuration for current tenant + +##### Description + +Get feature configuration for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [FeatureResponse](#featureresponse) | + +### /files/support-type + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /files/upload + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [UploadConfig](#uploadconfig) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | + +### /files/{file_id}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| file_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Email sent successfully | [ForgotPasswordEmailResponse](#forgotpasswordemailresponse) | +| 400 | Invalid email or rate limit exceeded | | + +### /forgot-password/resets + +#### POST +##### Description + +Reset password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Password reset successfully | [ForgotPasswordResetResponse](#forgotpasswordresetresponse) | +| 400 | Invalid token or password mismatch | | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Code verified successfully | [ForgotPasswordCheckResponse](#forgotpasswordcheckresponse) | +| 400 | Invalid code or token | | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by form token + +##### Description + +GET /console/api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by form token + +##### Description + +POST /console/api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /info + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /installed-apps + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [InstalledAppListResponse](#installedapplistresponse) | + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionMessageExplorePayload](#completionmessageexplorepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/pin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/unpin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/feedbacks + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/more-like-this + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MoreLikeThisQuery](#morelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/meta + +#### GET +##### Summary + +Get app meta + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageListQuery](#savedmessagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageCreatePayload](#savedmessagecreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages/{message_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /instruction-generate + +#### POST +##### Description + +Generate instruction for workflow nodes or general use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionGeneratePayload](#instructiongeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Instruction generated successfully | +| 400 | Invalid request parameters or flow/workflow not found | +| 402 | Provider quota exceeded | + +### /instruction-generate/template + +#### POST +##### Description + +Get instruction generation template + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionTemplatePayload](#instructiontemplatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Template retrieved successfully | +| 400 | Invalid request parameters | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /logout + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /mcp/oauth/callback + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notification + +#### GET +##### Description + +Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success — inspect should_show to decide whether to render the modal | +| 401 | Unauthorized | + +### /notification/dismiss + +#### POST +##### Description + +Mark a notification as dismissed for the current user. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 401 | Unauthorized | + +### /notion/pages/{page_id}/{page_type}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notion/pre-import/pages + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/authorize/{provider} + +#### GET +##### Description + +Handle OAuth callback and complete login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| state | query | Optional state parameter (used for invite token) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with access token | +| 400 | OAuth process failed | + +### /oauth/data-source/binding/{provider} + +#### GET +##### Description + +Bind OAuth data source with authorization code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source binding success | [OAuthDataSourceBindingResponse](#oauthdatasourcebindingresponse) | +| 400 | Invalid provider or code | | + +### /oauth/data-source/callback/{provider} + +#### GET +##### Description + +Handle OAuth callback from data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| error | query | Error message from OAuth provider | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with result | +| 400 | Invalid provider | + +### /oauth/data-source/{provider} + +#### GET +##### Description + +Get OAuth authorization URL for data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Authorization URL or internal setup success | [OAuthDataSourceResponse](#oauthdatasourceresponse) | +| 400 | Invalid provider | | +| 403 | Admin privileges required | | + +### /oauth/data-source/{provider}/{binding_id}/sync + +#### GET +##### Description + +Sync data from OAuth data source + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | Data source binding ID | Yes | string | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source sync success | [OAuthDataSourceSyncResponse](#oauthdatasourcesyncresponse) | +| 400 | Invalid provider or sync failed | | + +### /oauth/login/{provider} + +#### GET +##### Description + +Initiate OAuth login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| invite_token | query | Optional invitation token | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to OAuth authorization URL | +| 400 | Invalid provider | + +### /oauth/plugin/{provider_id}/datasource/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider_id}/datasource/get-authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/trigger/callback + +#### GET +##### Summary + +Handle OAuth callback for trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/account + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/authorize + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/customized/templates/{template_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/dataset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineDatasetImportPayload](#ragpipelinedatasetimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/empty-dataset + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates/{template_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/datasource-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineImportPayload](#ragpipelineimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{pipeline_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/recommended-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/transform/datasets/{dataset_id} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/customized/publish + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Payload](#payload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/exports + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| block_type | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft + +#### GET +##### Summary + +Get draft rag pipeline's workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Sync draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect + +#### POST +##### Summary + +Set datasource variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceVariablesPayload](#datasourcevariablespayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/environment-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunRequiredPayload](#noderunrequiredpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/system-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/publish + +#### GET +##### Summary + +Get published pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview + +#### POST +##### Summary + +Run datasource content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/run + +#### POST +##### Summary + +Run published workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [PublishedWorkflowRunPayload](#publishedworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete a published workflow version that is not currently active on the pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /refresh-token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/{url} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /reset-password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rule-code-generate + +#### POST +##### Description + +Generate code rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleCodeGeneratePayload](#rulecodegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Code rules generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-generate + +#### POST +##### Description + +Generate rule configuration using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleGeneratePayload](#rulegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Rule configuration generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-structured-output-generate + +#### POST +##### Description + +Generate structured output rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleStructuredOutputPayload](#rulestructuredoutputpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Structured output generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /spec/schema-definitions + +#### GET +##### Summary + +Get system JSON Schema definitions specification + +##### Description + +Used for frontend component type mapping + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /system-features + +#### GET +##### Summary + +Get system-wide feature configuration + +##### Description + +Get system-wide feature configuration +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for dashboard initialization. + +Authentication would create circular dependency (can't login without dashboard loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [SystemFeatureResponse](#systemfeatureresponse) | + +### /tag-bindings + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tag-bindings/remove + +#### POST +##### Description + +Remove one or more tag bindings from a target. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingRemovePayload](#tagbindingremovepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword for tag name. | No | string | +| type | query | Tag type filter. Can be "knowledge" or "app". | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ [TagResponse](#tagresponse) ] | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags/{tag_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /test/retrieval + +#### POST +##### Description + +Bedrock retrieval test (internal use only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [BedrockRetrievalPayload](#bedrockretrievalpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Bedrock retrieval test completed | + +### /trial-apps/{app_id} + +#### GET +##### Summary + +Get app detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ChatRequest](#chatrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionRequest](#completionrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/datasets + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Returns the site configuration for the application including theme, icons, and text. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [TextToSpeechRequest](#texttospeechrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows + +#### GET +##### Summary + +Get workflow detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunRequest](#workflowrunrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /website/crawl + +#### POST +##### Description + +Crawl website content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlPayload](#websitecrawlpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Website crawl initiated successfully | +| 400 | Invalid crawl parameters | + +### /website/crawl/status/{job_id} + +#### GET +##### Description + +Get website crawl status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlStatusQuery](#websitecrawlstatusquery) | +| job_id | path | Crawl job ID | Yes | string | +| provider | query | Crawl provider (firecrawl/watercrawl/jinareader) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Crawl status retrieved successfully | +| 400 | Invalid provider | +| 404 | Crawl job not found | + +### /workflow/{workflow_run_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /console/api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workflow/{workflow_run_id}/pause-details + +#### GET +##### Summary + +Get workflow pause details + +##### Description + +GET /console/api/workflow//pause-details + +Returns information about why and where the workflow is paused. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /workspaces/current/agent-provider/{provider_name} + +#### GET +##### Description + +Get specific agent provider details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_name | path | Agent provider name | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | object | + +### /workspaces/current/agent-providers + +#### GET +##### Description + +Get list of available agent providers + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ object ] | + +### /workspaces/current/dataset-operators + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/default-model + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGetDefault](#parsergetdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPostDefault](#parserpostdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/endpoints + +#### POST +##### Description + +Create a new plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/create + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/delete + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/disable + +#### POST +##### Description + +Disable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint disabled successfully | [EndpointDisableResponse](#endpointdisableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/enable + +#### POST +##### Description + +Enable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint enabled successfully | [EndpointEnableResponse](#endpointenableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/list + +#### GET +##### Description + +List plugin endpoints with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListQuery](#endpointlistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EndpointListResponse](#endpointlistresponse) | + +### /workspaces/current/endpoints/list/plugin + +#### GET +##### Description + +List endpoints for a specific plugin + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListForPluginQuery](#endpointlistforpluginquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [PluginEndpointListResponse](#pluginendpointlistresponse) | + +### /workspaces/current/endpoints/update + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LegacyEndpointUpdatePayload](#legacyendpointupdatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/{id} + +#### DELETE +##### Description + +Delete a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +#### PATCH +##### Description + +Update a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointUpdatePayload](#endpointupdatepayload) | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/members + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/members/invite-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MemberInvitePayload](#memberinvitepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/owner-transfer-check + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferCheckPayload](#ownertransfercheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/send-owner-transfer-confirm-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferEmailPayload](#ownertransferemailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/owner-transfer + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [OwnerTransferPayload](#ownertransferpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/update-role + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [MemberRoleUpdatePayload](#memberroleupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserModelList](#parsermodellist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/checkout-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialDelete](#parsercredentialdelete) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialId](#parsercredentialid) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialCreate](#parsercredentialcreate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialUpdate](#parsercredentialupdate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialSwitch](#parsercredentialswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialValidate](#parsercredentialvalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPostModels](#parserpostmodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteCredential](#parserdeletecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserGetCredentials](#parsergetcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCreateCredential](#parsercreatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserUpdateCredential](#parserupdatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserSwitch](#parserswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserValidate](#parservalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/disable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/enable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| config_id | path | | Yes | string | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/parameter-rules + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserParameter](#parserparameter) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/preferred-provider-type + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPreferredProviderType](#parserpreferredprovidertype) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/permission + +#### GET +##### Summary + +Get workspace permission settings + +##### Description + +Returns permission flags that control workspace features like member invitations and owner transfer. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/asset + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserAsset](#parserasset) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/debugging-key + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/fetch-manifest + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserIcon](#parsericon) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubInstall](#parsergithubinstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/pkg + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserList](#parserlist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/installations/ids + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/latest-versions + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/marketplace/pkg + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptions](#parserdynamicoptions) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options-with-credentials + +#### POST +##### Summary + +Fetch dynamic options using credentials directly (for edit mode) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptionsWithCredentials](#parserdynamicoptionswithcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPermissionChange](#parserpermissionchange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/autoupgrade/exclude + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserExcludePlugin](#parserexcludeplugin) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPreferencesChange](#parserpreferenceschange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/readme + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserReadme](#parserreadme) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserTasks](#parsertasks) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/delete_all + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete/{identifier} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| identifier | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/uninstall + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserUninstall](#parseruninstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpgrade](#parsergithubupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserMarketplaceUpgrade](#parsermarketplaceupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/bundle + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpload](#parsergithubupload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/pkg + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-labels + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderAddPayload](#apitoolprovideraddpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderDeletePayload](#apitoolproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/remote + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/schema + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolSchemaPayload](#apitoolschemapayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/test/pre + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolTestPayload](#apitooltestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderUpdatePayload](#apitoolproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolAddPayload](#builtintooladdpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| credential_type | path | | Yes | string | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credentials + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/default-credential + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinProviderDefaultCredentialPayload](#builtinproviderdefaultcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolCredentialDeletePayload](#builtintoolcredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ToolOAuthCustomClientPayload](#tooloauthcustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/tools + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolUpdatePayload](#builtintoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderDeletePayload](#mcpproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderCreatePayload](#mcpprovidercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderUpdatePayload](#mcpproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/auth + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPAuthPayload](#mcpauthpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/tools/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/update/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/create + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolCreatePayload](#workflowtoolcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolDeletePayload](#workflowtooldeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolUpdatePayload](#workflowtoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-providers + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/api + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/builtin + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/mcp + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/workflow + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/info + +#### GET +##### Summary + +Get info for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/oauth/client + +#### DELETE +##### Summary + +Remove custom OAuth client configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Summary + +Get OAuth client configuration for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Configure custom OAuth client for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerOAuthClientPayload](#triggeroauthclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id} + +#### POST +##### Summary + +Build a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/create + +#### POST +##### Summary + +Add a new subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderCreatePayload](#triggersubscriptionbuildercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id} + +#### GET +##### Summary + +Get the request logs for a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id} + +#### POST +##### Summary + +Update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id} + +#### POST +##### Summary + +Verify and update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id} + +#### GET +##### Summary + +Get a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/list + +#### GET +##### Summary + +List all trigger subscriptions for the current tenant's provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize + +#### GET +##### Summary + +Initiate OAuth authorization flow for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id} + +#### POST +##### Summary + +Verify credentials for an existing subscription (edit mode only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete + +#### POST +##### Summary + +Delete a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/update + +#### POST +##### Summary + +Update a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/triggers + +#### GET +##### Summary + +List all trigger providers for the current tenant + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceCustomConfigPayload](#workspacecustomconfigpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config/webapp-logo/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/info + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceInfoPayload](#workspaceinfopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SwitchWorkspacePayload](#switchworkspacepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| icon_type | path | | Yes | string | +| lang | path | | Yes | string | +| provider | path | | Yes | string | +| tenant_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +## default +Default namespace + +### /explore/banners + +#### GET +##### Summary + +Get banner list + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### APIBasedExtensionListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| APIBasedExtensionListResponse | array | | | + +#### APIBasedExtensionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | API endpoint URL | Yes | +| api_key | string | API key for authentication | Yes | +| name | string | Extension name | Yes | + +#### APIBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | | Yes | +| api_key | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| name | string | | Yes | + +#### Account + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| interface_language | | | No | +| interface_theme | | | No | +| is_password_set | boolean | | Yes | +| last_login_at | | | No | +| last_login_ip | | | No | +| name | string | | Yes | +| timezone | | | No | + +#### AccountAvatarPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | | Yes | + +#### AccountAvatarQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | Avatar file ID | Yes | + +#### AccountDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### AccountDeletionFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| feedback | string | | Yes | + +#### AccountInitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | +| invitation_code | | | No | +| timezone | string | | Yes | + +#### AccountIntegrateListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AccountIntegrateResponse](#accountintegrateresponse) ] | | Yes | + +#### AccountIntegrateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| is_bound | boolean | | Yes | +| link | | | No | +| provider | string | | Yes | + +#### AccountInterfaceLanguagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | + +#### AccountInterfaceThemePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_theme | string | *Enum:* `"dark"`, `"light"` | Yes | + +#### AccountNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### AccountPasswordPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password | | | No | +| repeat_new_password | string | | Yes | + +#### AccountTimezonePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| timezone | string | | Yes | + +#### AccountWithRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| last_active_at | | | No | +| last_login_at | | | No | +| name | string | | Yes | +| role | string | | Yes | +| status | string | | Yes | + +#### AccountWithRoleList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| accounts | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### ActivateCheckQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| interface_language | string | | Yes | +| name | string | | Yes | +| timezone | string | | Yes | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivationCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Activation data if valid | No | +| is_valid | boolean | Whether token is valid | Yes | + +#### ActivationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### AdvancedChatWorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | | No | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| message_id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### AdvancedChatWorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AdvancedChatWorkflowRunForList](#advancedchatworkflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### AdvancedChatWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | | | No | +| parent_message_id | | | No | +| query | string | | No | + +#### AdvancedPromptTemplateQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_mode | string | Application mode | Yes | +| has_context | string | Whether has context | No | +| model_mode | string | Model mode | Yes | +| model_name | string | Model name | Yes | + +#### AgentLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| message_id | string | Message UUID | Yes | + +#### AgentThought + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chain_id | | | No | +| created_at | | | No | +| files | [ string ] | | Yes | +| id | string | | Yes | +| message_chain_id | | | No | +| message_id | string | | Yes | +| observation | | | No | +| position | integer | | Yes | +| thought | | | No | +| tool | | | No | +| tool_input | | | No | +| tool_labels | [JSONValue](#jsonvalue) | | Yes | + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCountResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| count | integer | Number of annotations | Yes | + +#### AnnotationExportList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | + +#### AnnotationFilePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | Message ID | Yes | + +#### AnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_content | | | No | +| annotation_question | | | No | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | +| score | | | No | +| source | | | No | + +#### AnnotationHitHistoryList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AnnotationHitHistory](#annotationhithistory) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | string | Search keyword | No | +| limit | integer | Page size | No | +| page | integer | Page number | No | + +#### AnnotationReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### AnnotationReplyStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | *Enum:* `"disable"`, `"enable"` | Yes | + +#### AnnotationSettingUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Score threshold | Yes | + +#### ApiKeyAuthBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| credentials | object | | Yes | +| provider | string | | Yes | + +#### ApiKeyItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| last_used_at | | | No | +| token | string | | Yes | +| type | string | | Yes | + +#### ApiKeyList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ApiKeyItem](#apikeyitem) ] | | Yes | + +#### ApiProviderSchemaType + +Enum class for api provider schema type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ApiProviderSchemaType | string | Enum class for api provider schema type. | | + +#### ApiToolProviderAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | | Yes | + +#### ApiToolProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| original_provider | string | | Yes | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolSchemaPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| schema | string | | Yes | + +#### ApiToolTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| parameters | object | | Yes | +| provider_name | | | No | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | +| tool_name | string | | Yes | + +#### AppApiStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_api | boolean | Enable or disable API | Yes | + +#### AppDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| id | string | | Yes | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppDetailKernel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| mode | string | | No | +| name | string | | No | + +#### AppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| api_base_url | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| deleted_tools | [ [DeletedTool](#deletedtool) ] | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| site | | | No | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | boolean | Include secrets in export | No | +| workflow_id | | Specific workflow ID to export | No | + +#### AppExportResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | | Yes | + +#### AppIconPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | Icon data | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | + +#### AppImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | Import mode | Yes | +| name | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### AppListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_created_by_me | | Filter by creator | No | +| limit | integer | Page size (1-100) | No | +| mode | string | App mode filter
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"all"`, `"channel"`, `"chat"`, `"completion"`, `"workflow"` | No | +| name | | Filter by app name | No | +| page | integer | Page number (1-99999) | No | +| tag_ids | | Filter by tag IDs | No | + +#### AppMCPServerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | +| parameters | | | Yes | +| server_code | string | | Yes | +| status | [AppMCPServerStatus](#appmcpserverstatus) | | Yes | +| updated_at | | | No | + +#### AppMCPServerStatus + +AppMCPServer Status Enum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| AppMCPServerStatus | string | AppMCPServer Status Enum | | + +#### AppNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Name to check | Yes | + +#### AppPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [AppPartial](#apppartial) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### AppPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| author_name | | | No | +| create_user_name | | | No | +| created_at | | | No | +| created_by | | | No | +| desc_or_prompt | | | No | +| has_draft_trigger | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppSiteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| code | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | string | | Yes | +| default_language | string | | Yes | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| privacy_policy | | | No | +| prompt_public | boolean | | Yes | +| show_workflow_steps | boolean | | Yes | +| title | string | | Yes | +| use_icon_as_answer_icon | boolean | | Yes | + +#### AppSiteStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_site | boolean | Enable or disable site | Yes | + +#### AppSiteUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| use_icon_as_answer_icon | | | No | + +#### AppTracePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | Enable or disable tracing | Yes | +| tracing_provider | | Tracing provider | No | + +#### AudioTranscriptResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| text | string | Transcribed text from audio | Yes | + +#### BatchAddNotificationAccountsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notification_id | string | | Yes | +| user_email | [ string ] | List of account email addresses | Yes | + +#### BatchImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| upload_file_id | string | | Yes | + +#### BedrockRetrievalPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| knowledge_id | string | | Yes | +| query | string | | Yes | +| retrieval_setting | [BedrockRetrievalSetting](#bedrockretrievalsetting) | | Yes | + +#### BedrockRetrievalSetting + +Retrieval settings for Amazon Bedrock knowledge base queries. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Minimum relevance score threshold | No | +| top_k | | Maximum number of results to retrieve | No | + +#### BuiltinProviderDefaultCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### BuiltinToolAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | +| type | [CredentialType](#credentialtype) | | Yes | + +#### BuiltinToolCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### BuiltinToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### ButtonStyle + +Button styles for user actions. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ButtonStyle | string | Button styles for user actions. | | + +#### ChangeEmailResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_email | string | | Yes | +| token | string | | Yes | + +#### ChangeEmailSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | +| phase | | | No | +| token | | | No | + +#### ChangeEmailValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ChatConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| sort_by | string | Sort field and direction
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query | Yes | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### ChatMessagesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### ChatRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | object | | Yes | +| parent_message_id | | | No | +| query | string | | Yes | +| retriever_from | string | | No | + +#### CheckDependenciesResult + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [PluginDependency](#plugindependency) ] | | No | + +#### CheckEmailUniquePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | + +#### ChildChunkBatchUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunks | [ [ChildChunkUpdateArgs](#childchunkupdateargs) ] | | Yes | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | +| id | | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CodeBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Extension data | Yes | +| module | string | Module name | Yes | + +#### CompletionConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### CompletionMessageExplorePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| query | string | Query text | No | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### CompletionRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### ComplianceDownloadQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_name | string | Compliance document name | Yes | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConsoleDatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ids | [ string ] | Filter by dataset IDs | No | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### Conversation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotation | | | No | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| read_at | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationAnnotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| account | | | No | +| content | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | + +#### ConversationAnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_create_account | | | No | +| created_at | | | No | +| id | string | | Yes | + +#### ConversationDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| introduction | | | No | +| message_count | integer | | Yes | +| model_config | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | + +#### ConversationMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| status | string | | Yes | + +#### ConversationPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [Conversation](#conversation) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | Conversation variables for the draft workflow | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID to filter variables | Yes | + +#### ConversationWithSummary + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| message_count | integer | | Yes | +| model_config | | | No | +| name | string | | Yes | +| read_at | | | No | +| status | string | | Yes | +| status_count | | | No | +| summary_or_query | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationWithSummaryPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [ConversationWithSummary](#conversationwithsummary) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConvertToWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| name | | | No | + +#### CopyAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Description for the copied app | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| name | | Name for the copied app | No | + +#### CreateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | Annotation reply data | No | +| answer | | Answer text | No | +| content | | Content text | No | +| message_id | | Message ID | No | +| question | | Question text | No | + +#### CreateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| mode | string | App mode
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"chat"`, `"completion"`, `"workflow"` | Yes | +| name | string | App name | Yes | + +#### CredentialType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| CredentialType | string | | | + +#### DataSource + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| info_list | [InfoList](#infolist) | | Yes | + +#### DataSourceIntegrate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| disabled | boolean | | No | +| id | string | | No | +| is_bound | boolean | | No | +| link | string | | No | +| provider | string | | No | +| source_info | [DataSourceIntegrateWorkspace](#datasourceintegrateworkspace) | | No | + +#### DataSourceIntegrateIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | string | | No | +| type | string | | No | +| url | string | | No | + +#### DataSourceIntegrateList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [DataSourceIntegrate](#datasourceintegrate) ] | | No | + +#### DataSourceIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### DataSourceIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [DataSourceIntegratePage](#datasourceintegratepage) ] | | No | +| total | integer | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### DatasetAndDocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| batch | string | | Yes | +| dataset | [DatasetResponse](#datasetresponse) | | Yes | +| documents | [ [DocumentResponse](#documentresponse) ] | | Yes | + +#### DatasetBase + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| id | string | | No | +| indexing_technique | string | | No | +| name | string | | No | +| permission | string | | No | + +#### DatasetContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| content_type | string | | No | +| file_info | [DatasetFileInfo](#datasetfileinfo) | | No | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | + +#### DatasetDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_count | integer | | No | +| author_name | string | | No | +| built_in_field_enabled | boolean | | No | +| chunk_structure | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| doc_form | string | | No | +| doc_metadata | [ [DatasetDocMetadata](#datasetdocmetadata) ] | | No | +| document_count | integer | | No | +| embedding_available | boolean | | No | +| embedding_model | string | | No | +| embedding_model_provider | string | | No | +| enable_api | boolean | | No | +| external_knowledge_info | [ExternalKnowledgeInfo](#externalknowledgeinfo) | | No | +| external_retrieval_model | [ExternalRetrievalModel](#externalretrievalmodel) | | No | +| icon_info | [DatasetIconInfo](#dataseticoninfo) | | No | +| id | string | | No | +| indexing_technique | string | | No | +| is_multimodal | boolean | | No | +| is_published | boolean | | No | +| name | string | | No | +| permission | string | | No | +| pipeline_id | string | | No | +| provider | string | | No | +| retrieval_model_dict | [DatasetRetrievalModel](#datasetretrievalmodel) | | No | +| runtime_mode | string | | No | +| summary_index_setting | [_AnonymousInlineModel_b1954337d565](#_anonymousinlinemodel_b1954337d565) | | No | +| tags | [ [Tag](#tag) ] | | No | +| total_available_documents | integer | | No | +| total_documents | integer | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| word_count | integer | | No | + +#### DatasetDocMetadata + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### DatasetFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | string | | No | +| id | string | | No | +| mime_type | string | | No | +| name | string | | No | +| size | integer | | No | +| source_url | string | | No | + +#### DatasetIconInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | string | | No | + +#### DatasetKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetQueryDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| created_by_role | string | | No | +| id | string | | No | +| queries | [DatasetContent](#datasetcontent) | | No | +| source | string | | No | +| source_app_id | string | | No | + +#### DatasetRerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | string | | No | +| reranking_provider_name | string | | No | + +#### DatasetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| data_source_type | | | No | +| description | | | No | +| id | string | | Yes | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | + +#### DatasetRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_enable | boolean | | No | +| reranking_mode | string | | No | +| reranking_model | [DatasetRerankingModel](#datasetrerankingmodel) | | No | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| search_method | string | | No | +| top_k | integer | | No | +| weights | [DatasetWeightedScore](#datasetweightedscore) | | No | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| icon_info | | | No | +| indexing_technique | | | No | +| is_multimodal | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | No | +| embedding_provider_name | string | | No | +| vector_weight | number | | No | + +#### DatasetWeightedScore + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | [DatasetKeywordSetting](#datasetkeywordsetting) | | No | +| vector_setting | [DatasetVectorSetting](#datasetvectorsetting) | | No | +| weight_type | string | | No | + +#### DatasourceCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### DatasourceCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### DatasourceCredentialUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### DatasourceCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### DatasourceDefaultPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### DatasourceUpdateNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| name | string | | Yes | + +#### DatasourceVariablesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info | object | | Yes | +| datasource_type | string | | Yes | +| start_node_id | string | | Yes | +| start_node_title | string | | Yes | + +#### DebugPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DebugPermission | string | | | + +#### DefaultBlockConfigQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| q | | | No | + +#### DeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | +| tool_name | string | | Yes | +| type | string | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentMetadataResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | +| value | | | No | + +#### DocumentMetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_metadata | | | No | +| doc_type | | | No | + +#### DocumentRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### DocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| word_count | | | No | + +#### DocumentRetryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string ] | | Yes | + +#### DocumentWithSegmentsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| completed_segments | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| process_rule_dict | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| total_segments | | | No | +| word_count | | | No | + +#### DraftWorkflowNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | + +#### DraftWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| start_node_id | string | | Yes | + +#### DraftWorkflowSyncPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | | | No | +| environment_variables | | | No | +| features | | | No | +| graph | object | | Yes | +| hash | | | No | +| rag_pipeline_variables | | | No | + +#### DraftWorkflowTriggerRunAllPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_ids | [ string ] | | Yes | + +#### DraftWorkflowTriggerRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### DraftWorkflowTriggerRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | Node ID | Yes | + +#### EducationActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| institution | string | | Yes | +| role | string | | Yes | +| token | string | | Yes | + +#### EducationAutocompleteQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keywords | string | | Yes | +| limit | integer | | No | +| page | integer | | No | + +#### EducationAutocompleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| curr_page | | | No | +| data | [ string ] | | No | +| has_next | | | No | + +#### EducationStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_refresh | | | No | +| expire_at | | | No | +| is_student | | | No | +| result | | | No | + +#### EducationVerifyResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | | | No | + +#### EmailCodeLoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| language | | | No | +| token | string | | Yes | + +#### EmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailRegisterResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### EmailRegisterSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| language | | Language code | No | + +#### EmailRegisterValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### EndpointCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| settings | object | | Yes | + +#### EndpointCreateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDeleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDisableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointEnableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointIdPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | + +#### EndpointListForPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | +| plugin_id | string | | Yes | + +#### EndpointListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | + +#### EndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### EndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### EndpointUpdateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EnvironmentVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| environment_variables | [ object ] | Environment variables for the draft workflow | Yes | + +#### ExecutionContentType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ExecutionContentType | string | | | + +#### ExternalApiTemplateListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | + +#### ExternalDatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| external_knowledge_api_id | string | | Yes | +| external_knowledge_id | string | | Yes | +| external_retrieval_model | | | No | +| name | string | | Yes | + +#### ExternalHitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_retrieval_model | | | No | +| metadata_filtering_conditions | | | No | +| query | string | | Yes | + +#### ExternalKnowledgeApiPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### ExternalKnowledgeInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_knowledge_api_endpoint | string | | No | +| external_knowledge_api_id | string | | No | +| external_knowledge_api_name | string | | No | +| external_knowledge_id | string | | No | + +#### ExternalRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| top_k | integer | | No | + +#### FeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Feature configuration object | No | + +#### Feedback + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| from_account | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| rating | string | | Yes | + +#### FeedbackExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end_date | | End date (YYYY-MM-DD) | No | +| format | string | Export format
*Enum:* `"csv"`, `"json"` | No | +| from_source | | Filter by feedback source | No | +| has_comment | | Only include feedback with comments | No | +| rating | | Filter by rating | No | +| start_date | | Start date (YYYY-MM-DD) | No | + +#### FeedbackStat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dislike | integer | | Yes | +| like | integer | | Yes | + +#### FileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_ids | [ string ] | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| is_valid | boolean | Whether code is valid | Yes | +| token | string | New reset token | Yes | + +#### ForgotPasswordEmailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | | Error code if account not found | No | +| data | | Reset token | No | +| result | string | Operation result | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### FormInput + +Form input definition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| default | | | No | +| output_variable_name | string | | Yes | +| type | [FormInputType](#forminputtype) | | Yes | + +#### FormInputDefault + +Default configuration for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| selector | [ string ] | | No | +| type | [PlaceholderType](#placeholdertype) | | Yes | +| value | string | | No | + +#### FormInputType + +Form input types. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| FormInputType | string | Form input types. | | + +#### GenerateSummaryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_list | [ string ] | | Yes | + +#### Github + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| github_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### HitTestingChildChunk + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| id | | | No | +| position | | | No | +| score | | | No | + +#### HitTestingDocument + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | | | No | +| doc_metadata | | | No | +| doc_type | | | No | +| id | | | No | +| name | | | No | + +#### HitTestingFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | | | No | +| id | | | No | +| mime_type | | | No | +| name | | | No | +| size | | | No | +| source_url | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HitTestingRecord + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| child_chunks | [ [HitTestingChildChunk](#hittestingchildchunk) ] | | No | +| files | [ [HitTestingFile](#hittestingfile) ] | | No | +| score | | | No | +| segment | | | No | +| summary | | | No | +| tsne_position | | | No | + +#### HitTestingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| query | string | | Yes | +| records | [ [HitTestingRecord](#hittestingrecord) ] | | No | + +#### HitTestingSegment + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| completed_at | | | No | +| content | | | No | +| created_at | | | No | +| created_by | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| document | | | No | +| document_id | | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | | | No | +| index_node_hash | | | No | +| index_node_id | | | No | +| indexing_at | | | No | +| keywords | [ string ] | | No | +| position | | | No | +| sign_content | | | No | +| status | | | No | +| stopped_at | | | No | +| tokens | | | No | +| word_count | | | No | + +#### HumanInputContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| form_definition | | | No | +| form_submission_data | | | No | +| submitted | boolean | | Yes | +| type | [ExecutionContentType](#executioncontenttype) | | No | +| workflow_run_id | string | | Yes | + +#### HumanInputDeliveryTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| delivery_method_id | string | Delivery method ID | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormDefinition + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| actions | [ [UserAction](#useraction) ] | | No | +| display_in_ui | boolean | | No | +| expiration_time | integer | | Yes | +| form_content | string | | Yes | +| form_id | string | | Yes | +| form_token | | | No | +| inputs | [ [FormInput](#forminput) ] | | No | +| node_id | string | | Yes | +| node_title | string | | Yes | +| resolved_default_values | object | | No | + +#### HumanInputFormPreviewPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormSubmissionData + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action_id | string | | Yes | +| action_text | string | | Yes | +| node_id | string | | Yes | +| node_title | string | | Yes | +| rendered_content | string | | Yes | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | Selected action ID | Yes | +| form_inputs | object | Values the user provides for the form's own fields | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | Yes | + +#### IconType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| IconType | string | | | + +#### Import + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| app_mode | | | No | +| current_dsl_version | string | | No | +| error | string | | No | +| id | string | | Yes | +| imported_dsl_version | string | | No | +| status | [ImportStatus](#importstatus) | | Yes | + +#### ImportStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ImportStatus | string | | | + +#### IncludeSecretQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | string | | No | + +#### IndexingEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dataset_id | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| indexing_technique | string | | Yes | +| info_list | object | | Yes | +| process_rule | object | | Yes | + +#### InfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | string | *Enum:* `"notion_import"`, `"upload_file"`, `"website_crawl"` | Yes | +| file_info_list | | | No | +| notion_info_list | | | No | +| website_info_list | | | No | + +#### Inner + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | | | No | +| model_type | [ModelType](#modeltype) | | Yes | +| provider | | | No | + +#### InsertExploreAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| can_trial | boolean | | No | +| category | string | | Yes | +| copyright | | | No | +| custom_disclaimer | | | No | +| desc | | | No | +| language | string | | Yes | +| position | integer | | Yes | +| privacy_policy | | | No | +| trial_limit | integer | | No | + +#### InsertExploreBannerPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| description | string | | Yes | +| img-src | string | | Yes | +| language | string | | No | +| link | string | | Yes | +| sort | integer | | Yes | +| title | string | | Yes | + +#### InstallPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| InstallPermission | string | | | + +#### InstalledAppCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | + +#### InstalledAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | +| use_icon_as_answer_icon | | | No | + +#### InstalledAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| installed_apps | [ [InstalledAppResponse](#installedappresponse) ] | | Yes | + +#### InstalledAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | [InstalledAppInfoResponse](#installedappinforesponse) | | Yes | +| app_owner_tenant_id | string | | Yes | +| editable | boolean | | Yes | +| id | string | | Yes | +| is_pinned | boolean | | Yes | +| last_used_at | | | No | +| uninstallable | boolean | | Yes | + +#### InstalledAppUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_pinned | | | No | + +#### InstalledAppsListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | App ID to filter by | No | + +#### InstructionGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current | string | Current instruction text | No | +| flow_id | string | Workflow/Flow ID | Yes | +| ideal_output | string | Expected ideal output | No | +| instruction | string | Instruction for generation | Yes | +| language | string | Programming language (javascript/python) | No | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| node_id | string | Node ID for workflow context | No | + +#### InstructionTemplatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | Instruction template type | Yes | + +#### IterationNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### JSONValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JSONValue | | | | + +#### KnowledgeConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| duplicate | boolean | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | string | *Enum:* `"economy"`, `"high_quality"` | Yes | +| is_multimodal | boolean | | No | +| name | | | No | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### LLMMode + +Enum class for large language model mode. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| LLMMode | string | Enum class for large language model mode. | | + +#### LangContentPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| body | string | | Yes | +| lang | string | Language tag: 'zh' \| 'en' \| 'jp' | Yes | +| subtitle | | | No | +| title | string | | Yes | +| title_pic_url | | | No | + +#### LegacyEndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | +| name | string | | Yes | +| settings | object | | Yes | + +#### LoadBalancingCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### LoadBalancingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| configs | | | No | +| enabled | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| invite_token | | Invitation token | No | +| password | string | | Yes | +| remember_me | boolean | Remember me flag | No | + +#### LoopNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### MCPAuthPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authorization_code | | | No | +| provider_id | string | | Yes | + +#### MCPProviderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | + +#### MCPProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| provider_id | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPServerCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| parameters | object | Server parameters configuration | Yes | + +#### MCPServerUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| id | string | Server ID | Yes | +| parameters | object | Server parameters configuration | Yes | +| status | | Server status | No | + +#### Marketplace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marketplace_plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### MemberInvitePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emails | [ string ] | | No | +| language | | | No | +| role | [TenantAccountRole](#tenantaccountrole) | | Yes | + +#### MemberRoleUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| role | string | | Yes | + +#### MessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | Yes | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | integer | | Yes | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| feedbacks | [ [Feedback](#feedback) ] | | Yes | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | [JSONValue](#jsonvalue) | | Yes | +| message_files | [ [MessageFile](#messagefile) ] | | Yes | +| message_metadata_dict | [JSONValue](#jsonvalue) | | Yes | +| message_tokens | integer | | Yes | +| parent_message_id | | | No | +| provider_response_latency | number | | Yes | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageDetailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | No | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | | | No | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| extra_contents | [ [HumanInputContent](#humaninputcontent) ] | | No | +| feedbacks | [ [Feedback](#feedback) ] | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | | | No | +| message_files | [ [MessageFile](#messagefile) ] | | No | +| message_metadata_dict | | | No | +| message_tokens | | | No | +| parent_message_id | | | No | +| provider_response_latency | | | No | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| message_id | string | Message ID | Yes | +| rating | | | No | + +#### MessageFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| belongs_to | | | No | +| filename | string | | Yes | +| id | string | | Yes | +| mime_type | | | No | +| size | | | No | +| transfer_method | string | | Yes | +| type | string | | Yes | +| upload_file_id | | | No | +| url | | | No | + +#### MessageInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [MessageDetailResponse](#messagedetailresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### ModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_params | object | | No | +| mode | [LLMMode](#llmmode) | | Yes | +| name | string | | Yes | +| provider | string | | Yes | + +#### ModelConfigPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| model_dict | | | No | +| pre_prompt | | | No | +| updated_at | | | No | +| updated_by | | | No | + +#### ModelConfigRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | | Agent mode configuration | No | +| configs | | Model configuration parameters | No | +| dataset_configs | | Dataset configurations | No | +| model | | Model name | No | +| more_like_this | | More like this configuration | No | +| opening_statement | | Opening statement | No | +| provider | | Model provider | No | +| retrieval_model | | Retrieval model configuration | No | +| speech_to_text | | Speech to text configuration | No | +| suggested_questions | | Suggested questions | No | +| text_to_speech | | Text to speech configuration | No | +| tools | | Available tools | No | + +#### ModelType + +Enum class for model type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ModelType | string | Enum class for model type. | | + +#### MoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | Yes | + +#### NodeIdQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### NodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### NodeRunRequiredPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | | Yes | + +#### NotionEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| notion_info_list | [ object ] | | Yes | +| process_rule | object | | Yes | + +#### NotionIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | | | No | +| type | string | | Yes | +| url | | | No | + +#### NotionInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| pages | [ [NotionPage](#notionpage) ] | | Yes | +| workspace_id | string | | Yes | + +#### NotionIntegrateInfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notion_info | [ [NotionIntegrateWorkspace](#notionintegrateworkspace) ] | | No | + +#### NotionIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_bound | boolean | | No | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### NotionIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [NotionIntegratePage](#notionintegratepage) ] | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### NotionPage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | | | No | +| page_id | string | | Yes | +| page_name | string | | Yes | +| type | string | | Yes | + +#### OAuthDataSourceBindingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OAuthDataSourceResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | Authorization URL or 'internal' for internal setup | Yes | + +#### OAuthDataSourceSyncResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OwnerTransferCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### OwnerTransferEmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### OwnerTransferPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | string | | Yes | + +#### Package + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### PaginatedConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### Parser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### ParserAsset + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_name | string | | Yes | +| plugin_unique_identifier | string | | Yes | + +#### ParserCreateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserCredentialCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialDelete + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialId + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | + +#### ParserCredentialSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### ParserDeleteCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDeleteModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDynamicOptions + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | | | No | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | +| provider_type | string | *Enum:* `"tool"`, `"trigger"` | Yes | + +#### ParserDynamicOptionsWithCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | + +#### ParserEnable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_trigger | boolean | | Yes | +| trigger_id | string | | Yes | + +#### ParserExcludePlugin + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_id | string | | Yes | + +#### ParserGetCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGetDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGithubInstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| filename | string | | Yes | +| tenant_id | string | | Yes | + +#### ParserLatest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_ids | [ string ] | | Yes | + +#### ParserList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserMarketplaceUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | + +#### ParserModelList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | | | No | + +#### ParserParameter + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | + +#### ParserPermissionChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | Yes | +| install_permission | [InstallPermission](#installpermission) | | Yes | + +#### ParserPluginIdentifierQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | + +#### ParserPluginIdentifiers + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifiers | [ string ] | | Yes | + +#### ParserPostDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_settings | [ [Inner](#inner) ] | | Yes | + +#### ParserPostModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| load_balancing | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserPreferencesChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_upgrade | [PluginAutoUpgradeSettingsPayload](#pluginautoupgradesettingspayload) | | Yes | +| permission | [PluginPermissionSettingsPayload](#pluginpermissionsettingspayload) | | Yes | + +#### ParserPreferredProviderType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| preferred_provider_type | string | *Enum:* `"custom"`, `"system"` | Yes | + +#### ParserReadme + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | | No | +| plugin_unique_identifier | string | | Yes | + +#### ParserSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserTasks + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserUninstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_installation_id | string | | Yes | + +#### ParserUpdateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### PartnerTenantsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| click_id | string | Click Id from partner referral link | Yes | + +#### Payload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon_info | | | No | +| name | string | | Yes | + +#### PipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### PlaceholderType + +Default value types for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| PlaceholderType | string | Default value types for form inputs. | | + +#### PluginAutoUpgradeSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| exclude_plugins | [ string ] | | No | +| include_plugins | [ string ] | | No | +| strategy_setting | [StrategySetting](#strategysetting) | | No | +| upgrade_mode | [UpgradeMode](#upgrademode) | | No | +| upgrade_time_of_day | integer | | No | + +#### PluginDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | | | No | +| type | [Type](#type) | | Yes | +| value | | | Yes | + +#### PluginEndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### PluginPermissionSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | No | +| install_permission | [InstallPermission](#installpermission) | | No | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### PublishWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### PublishedWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_preview | boolean | | No | +| original_document_id | | | No | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | No | +| start_node_id | string | | Yes | + +#### RagPipelineDatasetImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| yaml_content | string | | Yes | + +#### RagPipelineImport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_dsl_version | string | | No | +| dataset_id | string | | No | +| error | string | | No | +| id | string | | No | +| imported_dsl_version | string | | No | +| pipeline_id | string | | No | +| status | string | | No | + +#### RagPipelineImportCheckDependencies + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [RagPipelineLeakedDependency](#ragpipelineleakeddependency) ] | | No | + +#### RagPipelineImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | | Yes | +| name | | | No | +| pipeline_id | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### RagPipelineLeakedDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | string | | No | +| type | string | | No | +| value | object | | No | + +#### RagPipelineRecommendedPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | | No | + +#### RecommendedAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | + +#### RecommendedAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| categories | [ string ] | | Yes | +| recommended_apps | [ [RecommendedAppResponse](#recommendedappresponse) ] | | Yes | + +#### RecommendedAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | | | No | +| app_id | string | | Yes | +| can_trial | | | No | +| categories | [ string ] | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| description | | | No | +| is_listed | | | No | +| position | | | No | +| privacy_policy | | | No | + +#### RecommendedAppsQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### RelatedAppList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AppDetailKernel](#appdetailkernel) ] | | No | +| total | integer | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### ResultResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | | Yes | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### RuleCodeGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code_language | string | Programming language for code generation | No | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleStructuredOutputPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Structured output generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | string | | No | +| hit_count_gte | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | [ string ] | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### SimpleMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | | Yes | +| inputs | object | | Yes | +| message | string | | Yes | +| query | string | | Yes | + +#### SimpleModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_dict | | | No | +| pre_prompt | | | No | + +#### Site + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_base_url | | | No | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| code | | | No | +| copyright | | | No | +| created_at | | | No | +| created_by | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | + +#### StatisticTimeRangeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### StatusCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | Yes | +| partial_success | integer | | Yes | +| paused | integer | | Yes | +| success | integer | | Yes | + +#### StrategySetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| StrategySetting | string | | | + +#### SubscriptionQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interval | string | Billing interval
*Enum:* `"month"`, `"year"` | Yes | +| plan | string | Subscription plan
*Enum:* `"professional"`, `"team"` | Yes | + +#### SuggestedQuestionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ string ] | Suggested question | Yes | + +#### SwitchWorkspacePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tenant_id | string | | Yes | + +#### SyncDraftWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | | No | +| environment_variables | [ object ] | | No | +| features | object | | Yes | +| graph | object | | Yes | +| hash | | | No | + +#### SyncDraftWorkflowResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| hash | string | | No | +| result | string | | No | +| updated_at | string | | No | + +#### SystemFeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | System feature configuration object | No | + +#### Tag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### TagBasePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Tag name | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to bind | Yes | +| target_id | string | Target ID to bind tags to | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingRemovePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to remove | Yes | +| target_id | string | Target ID to unbind tag from | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagListQueryParam + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| type | string | Tag type filter
*Enum:* `""`, `"app"`, `"knowledge"` | No | + +#### TagResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | | | No | + +#### TagType + +Tag type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TagType | string | Tag type | | + +#### TenantAccountRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TenantAccountRole | string | | | + +#### TenantInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| custom_config | | | No | +| id | string | | Yes | +| in_trial | | | No | +| name | | | No | +| next_credit_reset_date | | | No | +| plan | | | No | +| role | | | No | +| status | | | No | +| trial_credits | | | No | +| trial_credits_used | | | No | +| trial_end_reason | | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### TextToSpeechPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Whether to stream audio | No | +| text | string | Text to convert | Yes | +| voice | | Voice name | No | + +#### TextToSpeechRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | | No | +| streaming | | | No | +| text | | | No | +| voice | | | No | + +#### TextToSpeechVoiceQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | Language code | Yes | + +#### ToolOAuthCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### ToolParameterForm + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ToolParameterForm | string | | | + +#### TraceConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_config | object | Tracing configuration data | Yes | +| tracing_provider | string | Tracing provider name | Yes | + +#### TraceProviderQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_provider | string | Tracing provider name | Yes | + +#### TrialAppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | string | | No | +| api_base_url | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| deleted_tools | [ [TrialDeletedTool](#trialdeletedtool) ] | | No | +| description | string | | No | +| enable_api | boolean | | No | +| enable_site | boolean | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| max_active_requests | integer | | No | +| mode | string | | No | +| model_config | [TrialAppModelConfig](#trialappmodelconfig) | | No | +| name | string | | No | +| site | [TrialSite](#trialsite) | | No | +| tags | [ [TrialTag](#trialtag) ] | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | +| workflow | [TrialWorkflowPartial](#trialworkflowpartial) | | No | + +#### TrialAppModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | object | | No | +| annotation_reply | object | | No | +| chat_prompt_config | object | | No | +| completion_prompt_config | object | | No | +| created_at | object | | No | +| created_by | string | | No | +| dataset_configs | object | | No | +| dataset_query_variable | string | | No | +| external_data_tools | object | | No | +| file_upload | object | | No | +| model | object | | No | +| more_like_this | object | | No | +| opening_statement | string | | No | +| pre_prompt | string | | No | +| prompt_type | string | | No | +| retriever_resource | object | | No | +| sensitive_word_avoidance | object | | No | +| speech_to_text | object | | No | +| suggested_questions | object | | No | +| suggested_questions_after_answer | object | | No | +| text_to_speech | object | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| user_input_form | object | | No | + +#### TrialConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### TrialDeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | No | +| tool_name | string | | No | +| type | string | | No | + +#### TrialPipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### TrialSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_token | string | | No | +| app_base_url | string | | No | +| chat_color_theme | string | | No | +| chat_color_theme_inverted | boolean | | No | +| code | string | | No | +| copyright | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| custom_disclaimer | string | | No | +| customize_domain | string | | No | +| customize_token_strategy | string | | No | +| default_language | string | | No | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| privacy_policy | string | | No | +| prompt_public | boolean | | No | +| show_workflow_steps | boolean | | No | +| title | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | + +#### TrialTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### TrialWorkflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [TrialConversationVariable](#trialconversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [TrialPipelineVariable](#trialpipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### TrialWorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| id | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | + +#### TriggerOAuthClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enabled | | | No | + +#### TriggerSubscriptionBuilderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_type | string | | No | + +#### TriggerSubscriptionBuilderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | | | No | +| name | | | No | +| parameters | | | No | +| properties | | | No | + +#### TriggerSubscriptionBuilderVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### Type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| Type | string | | | + +#### UpdateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | | No | +| answer | | | No | +| content | | | No | +| question | | | No | + +#### UpdateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| max_active_requests | | Maximum active requests | No | +| name | string | App name | Yes | +| use_icon_as_answer_icon | | Use icon as answer icon | No | + +#### UpgradeMode + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| UpgradeMode | string | | | + +#### UploadConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_image_file_size_limit | | | No | +| audio_file_size_limit | integer | | Yes | +| batch_count_limit | integer | | Yes | +| file_size_limit | integer | | Yes | +| file_upload_limit | | | No | +| image_file_batch_limit | integer | | Yes | +| image_file_size_limit | integer | | Yes | +| single_chunk_attachment_limit | integer | | Yes | +| video_file_size_limit | integer | | Yes | +| workflow_file_upload_limit | integer | | Yes | + +#### UpsertNotificationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| contents | [ [LangContentPayload](#langcontentpayload) ] | | Yes | +| end_time | | RFC3339, e.g. 2026-03-20T23:59:59Z | No | +| frequency | string | 'once' \| 'every_page_load' | No | +| notification_id | | Omit to create; supply UUID to update | No | +| start_time | | RFC3339, e.g. 2026-03-01T00:00:00Z | No | +| status | string | 'active' \| 'inactive' | No | + +#### UserAction + +User action configuration. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| button_style | [ButtonStyle](#buttonstyle) | | No | +| id | string | | Yes | +| title | string | | Yes | + +#### WebhookTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| node_id | string | | Yes | +| webhook_debug_url | string | | Yes | +| webhook_id | string | | Yes | +| webhook_url | string | | Yes | + +#### WebsiteCrawlPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| options | object | | Yes | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | +| url | string | | Yes | + +#### WebsiteCrawlStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | + +#### WebsiteInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| job_id | string | | Yes | +| only_main_content | boolean | | No | +| provider | string | | Yes | +| urls | [ string ] | | Yes | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### Workflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [ConversationVariable](#conversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [PipelineVariable](#pipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowAppLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | Filter logs created after this timestamp | No | +| created_at__before | | Filter logs created before this timestamp | No | +| created_by_account | | Filter by account | No | +| created_by_end_user_session_id | | Filter by end user session ID | No | +| detail | boolean | Whether to return detailed logs | No | +| keyword | | Search keyword for filtering logs | No | +| limit | integer | Number of items per page (1-100) | No | +| page | integer | Page number (1-99999) | No | +| status | | Execution status filter (succeeded, failed, stopped, partial-succeeded) | No | + +#### WorkflowArchivedLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowArchivedLogPartialResponse](#workflowarchivedlogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowArchivedLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| id | string | | Yes | +| trigger_metadata | | | No | +| workflow_run | | | No | + +#### WorkflowCommentBasic + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mention_count | integer | | No | +| participants | [ [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| reply_count | integer | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | +| position_x | number | Comment X position | Yes | +| position_y | number | Comment Y position | Yes | + +#### WorkflowCommentDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mentions | [ [_AnonymousInlineModel_f7ff64cce858](#_anonymousinlinemodel_f7ff64cce858) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| replies | [ [_AnonymousInlineModel_55c39c6a4b9e](#_anonymousinlinemodel_55c39c6a4b9e) ] | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentMentionUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| users | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### WorkflowCommentReplyCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Reply content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | + +#### WorkflowCommentReplyUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentResolve + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | + +#### WorkflowCommentUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | | Mentioned user IDs. Omit to keep existing mentions. | No | +| position_x | | Comment X position | No | +| position_y | | Comment Y position | No | + +#### WorkflowDraftEnvVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftEnvVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftEnvVariable](#workflowdraftenvvariable) ] | | No | + +#### WorkflowDraftVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| full_content | object | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value | object | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariable](#workflowdraftvariable) ] | | No | + +#### WorkflowDraftVariableListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Items per page | No | +| page | integer | Page number | No | + +#### WorkflowDraftVariableListWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariableWithoutValue](#workflowdraftvariablewithoutvalue) ] | | No | +| total | object | | No | + +#### WorkflowDraftVariablePatchPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | | No | +| value | | | No | + +#### WorkflowDraftVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | Variable name | No | +| value | | Variable value | No | + +#### WorkflowDraftVariableWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowExecutionStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| WorkflowExecutionStatus | string | | | + +#### WorkflowFeaturesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Workflow feature configuration | Yes | + +#### WorkflowListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| named_only | boolean | | No | +| page | integer | | No | +| user_id | | | No | + +#### WorkflowOnlineUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_ids | [ string ] | App IDs | No | + +#### WorkflowPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_more | boolean | | No | +| items | [ [Workflow](#workflow) ] | | No | +| limit | integer | | No | +| page | integer | | No | + +#### WorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| id | string | | Yes | +| updated_at | | | No | +| updated_by | | | No | + +#### WorkflowRunCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | No | +| partial_succeeded | integer | | No | +| running | integer | | No | +| stopped | integer | | No | +| succeeded | integer | | No | +| total | integer | | No | + +#### WorkflowRunCountQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | | Workflow run status filter | No | +| time_range | | Time range filter (e.g., 7d, 4h, 30m, 30s) | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| graph | object | | No | +| id | string | | No | +| inputs | object | | No | +| outputs | object | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunExport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| presigned_url | string | Pre-signed URL for download | No | +| presigned_url_expires_at | string | Pre-signed URL expiration time | No | +| status | string | Export status: success/failed | No | + +#### WorkflowRunForArchivedLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| elapsed_time | | | No | +| id | string | | Yes | +| status | | | No | +| total_tokens | | | No | +| triggered_from | | | No | + +#### WorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last run ID for pagination | No | +| limit | integer | Number of items per page (1-100) | No | +| status | | Workflow run status filter | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunNodeExecution + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| execution_metadata | object | | No | +| extras | object | | No | +| finished_at | object | | No | +| id | string | | No | +| index | integer | | No | +| inputs | object | | No | +| inputs_truncated | boolean | | No | +| node_id | string | | No | +| node_type | string | | No | +| outputs | object | | No | +| outputs_truncated | boolean | | No | +| predecessor_node_id | string | | No | +| process_data | object | | No | +| process_data_truncated | boolean | | No | +| status | string | | No | +| title | string | | No | + +#### WorkflowRunNodeExecutionList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunNodeExecution](#workflowrunnodeexecution) ] | | No | + +#### WorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunForList](#workflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowRunQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### WorkflowRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowStatisticQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date and time (YYYY-MM-DD HH:MM) | No | +| start | | Start date and time (YYYY-MM-DD HH:MM) | No | + +#### WorkflowToolCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_app_id | string | | Yes | + +#### WorkflowToolDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| workflow_tool_id | string | | Yes | + +#### WorkflowToolParameterConfiguration + +Workflow tool configuration + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | The description of the parameter | Yes | +| form | [ToolParameterForm](#toolparameterform) | The form of the parameter | Yes | +| name | string | The name of the parameter | Yes | + +#### WorkflowToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_tool_id | string | | Yes | + +#### WorkflowTriggerListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowTriggerResponse](#workflowtriggerresponse) ] | | Yes | + +#### WorkflowTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| icon | string | | Yes | +| id | string | | Yes | +| node_id | string | | Yes | +| provider_name | string | | Yes | +| status | string | | Yes | +| title | string | | Yes | +| trigger_type | string | | Yes | +| updated_at | | | No | + +#### WorkflowUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### WorkspaceCustomConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| remove_webapp_brand | | | No | +| replace_webapp_logo | | | No | + +#### WorkspaceInfoPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### WorkspaceListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| page | integer | | No | + +#### _AnonymousInlineModel_55c39c6a4b9e + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | + +#### _AnonymousInlineModel_6fec07cd0d85 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar_url | object | | No | +| email | string | | No | +| id | string | | No | +| name | string | | No | + +#### _AnonymousInlineModel_b1954337d565 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable | boolean | | No | +| model_name | string | | No | +| model_provider_name | string | | No | +| summary_prompt | string | | No | + +#### _AnonymousInlineModel_f7ff64cce858 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mentioned_user_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| mentioned_user_id | string | | No | +| reply_id | string | | No | + +## FastOpenAPI Preview (OpenAPI 3.0) + +### Dify API (FastOpenAPI PoC) +FastOpenAPI proof of concept for Dify API + +#### Version: 1.0 + +--- + +##### [GET] /console/api/init +**Get initialization validation status.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [InitStatusResponse](#initstatusresponse)
| + +##### [POST] /console/api/init +**Validate initialization password.** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [InitValidatePayload](#initvalidatepayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [InitValidateResponse](#initvalidateresponse)
| + +##### [GET] /console/api/ping +**Health check endpoint for connection testing.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [PingResponse](#pingresponse)
| + +##### [GET] /console/api/setup +**Get system setup status. + + NOTE: This endpoint is unauthenticated by design. + + During first-time bootstrap there is no admin account yet, so frontend initialization must be + able to query setup progress before any login flow exists. + + Only bootstrap-safe status information should be returned by this endpoint. + ** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [SetupStatusResponse](#setupstatusresponse)
| + +##### [POST] /console/api/setup +**Initialize system setup with admin account. + + NOTE: This endpoint is unauthenticated by design for first-time bootstrap. + Access is restricted by deployment mode (`SELF_HOSTED`), one-time setup guards, + and init-password validation rather than user session authentication. + ** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [SetupRequestPayload](#setuprequestpayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [SetupResponse](#setupresponse)
| + +##### [GET] /console/api/version +**Check for application version updates.** + +###### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| current_version | query | | Yes | string | + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [VersionResponse](#versionresponse)
| + +--- +##### Schemas + +###### ErrorSchema + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| error | { **"details"**: string, **"message"**: string, **"status"**: integer, **"type"**: string } | | Yes | + +###### InitStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | string,
**Available values:** "finished", "not_started" | Initialization status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### InitValidatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| password | string | Initialization password | Yes | + +###### InitValidateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +###### PingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Health check result | Yes | + +###### SetupRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Admin email address | Yes | +| language | | Admin language | No | +| name | string | Admin name (max 30 characters) | Yes | +| password | string | Admin password | Yes | + +###### SetupResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Setup result | Yes | + +###### SetupStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| setup_at | | Setup completion time (ISO format) | No | +| step | string,
**Available values:** "finished", "not_started" | Setup step status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### VersionFeatures + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_replace_logo | boolean | Whether logo replacement is supported | Yes | +| model_load_balancing_enabled | boolean | Whether model load balancing is enabled | Yes | + +###### VersionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_auto_update | boolean | Whether auto-update is supported | Yes | +| features | [VersionFeatures](#versionfeatures) | Feature flags and capabilities | Yes | +| release_date | string | Release date of latest version | Yes | +| release_notes | string | Release notes for latest version | Yes | +| version | string | Latest version number | Yes | diff --git a/api/openapi/markdown/service-swagger.md b/api/openapi/markdown/service-swagger.md new file mode 100644 index 0000000000..ec5ed280f5 --- /dev/null +++ b/api/openapi/markdown/service-swagger.md @@ -0,0 +1,2754 @@ +# Service API +API for application services + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## service_api +Service operations + +### / + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/feedbacks + +#### GET +##### Summary + +Get all feedbacks for the application + +##### Description + +Get all feedbacks for the application +Returns paginated list of all feedback submitted for messages in this app. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackListQuery](#feedbacklistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedbacks retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action} + +#### POST +##### Summary + +Enable or disable annotation reply feature + +##### Description + +Enable or disable annotation reply feature + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyActionPayload](#annotationreplyactionpayload) | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action}/status/{job_id} + +#### GET +##### Summary + +Get the status of an annotation reply action job + +##### Description + +Get the status of an annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Job not found | + +### /apps/annotations + +#### GET +##### Summary + +List annotations for the application + +##### Description + +List annotations for the application + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations retrieved successfully | [AnnotationList](#annotationlist) | +| 401 | Unauthorized - invalid API token | | + +#### POST +##### Summary + +Create a new annotation + +##### Description + +Create a new annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | + +### /apps/annotations/{annotation_id} + +#### DELETE +##### Summary + +Delete an annotation + +##### Description + +Delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Annotation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Annotation not found | + +#### PUT +##### Summary + +Update an existing annotation + +##### Description + +Update an existing annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | +| 403 | Forbidden - insufficient permissions | | +| 404 | Annotation not found | | + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text using speech-to-text + +##### Description + +Convert audio to text using speech-to-text +Accepts an audio file upload and returns the transcribed text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Audio successfully transcribed | +| 400 | Bad request - no audio or invalid audio | +| 401 | Unauthorized - invalid API token | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal server error | + +### /chat-messages + +#### POST +##### Summary + +Send a message in a chat conversation + +##### Description + +Send a message in a chat conversation +This endpoint handles chat messages for chat, agent chat, and advanced chat applications. +Supports conversation management and both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatRequestPayload](#chatrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message sent successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running chat message generation + +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /completion-messages + +#### POST +##### Summary + +Create a completion for the given prompt + +##### Description + +Create a completion for the given prompt +This endpoint generates a completion based on the provided inputs and query. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionRequestPayload](#completionrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | +| 500 | Internal server error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running completion task + +##### Description + +Stop a running completion task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /conversations + +#### GET +##### Summary + +List all conversations for the current user + +##### Description + +List all conversations for the current user +Supports pagination using last_id and limit parameters. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversations retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Last conversation not found | + +### /conversations/{c_id} + +#### DELETE +##### Summary + +Delete a specific conversation + +##### Description + +Delete a specific conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/name + +#### POST +##### Summary + +Rename a conversation or auto-generate a name + +##### Description + +Rename a conversation or auto-generate a name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/variables + +#### GET +##### Summary + +List all variables for a conversation + +##### Description + +List all variables for a conversation +Conversational variables are only available for chat applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variables retrieved successfully | [ConversationVariableInfiniteScrollPaginationResponse](#conversationvariableinfinitescrollpaginationresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation not found | | + +### /conversations/{c_id}/variables/{variable_id} + +#### PUT +##### Summary + +Update a conversation variable's value + +##### Description + +Update a conversation variable's value +Allows updating the value of a specific conversation variable. +The value must match the variable's expected type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| c_id | path | Conversation ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [ConversationVariableResponse](#conversationvariableresponse) | +| 400 | Bad request - type mismatch | | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation or variable not found | | + +### /datasets + +#### GET +##### Summary + +Resource for getting datasets + +##### Description + +List all datasets + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### POST +##### Summary + +Resource for creating datasets + +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/pipeline/file-upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file to a knowledgebase pipeline +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | File uploaded successfully | +| 400 | Bad request - no file or invalid file | +| 401 | Unauthorized - invalid API token | +| 413 | File too large | +| 415 | Unsupported file type | + +### /datasets/tags + +#### DELETE +##### Summary + +Delete a knowledge type tag + +##### Description + +Delete a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagDeletePayload](#tagdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tag deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get all knowledge type tags + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### PATCH +##### Description + +Update a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUpdatePayload](#tagupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### POST +##### Summary + +Add a knowledge type tag + +##### Description + +Add a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagCreatePayload](#tagcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag created successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/binding + +#### POST +##### Description + +Bind tags to a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags bound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/unbinding + +#### POST +##### Description + +Unbind tags from a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUnbindingPayload](#tagunbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags unbound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/{dataset_id} + +#### DELETE +##### Summary + +Deletes a dataset given its ID + +##### Description + +Delete a dataset +Args: + _: ignore + dataset_id (UUID): The ID of the dataset to be deleted. + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + if the dataset was successfully deleted. Omitted in HTTP response. + int: HTTP status code 204 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Dataset deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | +| 409 | Conflict - dataset is in use | + +#### GET +##### Description + +Get a specific dataset by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +#### PATCH +##### Description + +Update an existing dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/document/create-by-file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create-by-text + +#### POST +##### Description + +Create a new document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/documents + +#### GET +##### Description + +List all documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Description + +Download selected uploaded documents as a single ZIP archive + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | ZIP archive generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Summary + +Update metadata for multiple documents + +##### Description + +Update metadata for multiple documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/status/{action} + +#### PATCH +##### Summary + +Batch update document status + +##### Description + +Batch update document status +Args: + tenant_id: tenant id + dataset_id: dataset id + action: action to perform (Literal["enable", "disable", "archive", "un_archive"]) + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + int: HTTP status code 200 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + Forbidden: If the user does not have permission. + InvalidActionError: If the action is invalid or cannot be performed. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable', 'disable', 'archive', or 'un_archive' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document status updated successfully | +| 400 | Bad request - invalid action | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/{batch}/indexing-status + +#### GET +##### Description + +Get indexing status for documents in a batch + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | Batch ID | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or documents not found | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Summary + +Delete document + +##### Description + +Delete a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Document deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - document is archived | +| 404 | Document not found | + +#### GET +##### Description + +Get a specific document by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document not found | + +#### PATCH +##### Description + +Update an existing document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Download URL generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or upload file not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### GET +##### Description + +List segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +#### POST +##### Description + +Create segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments created successfully | +| 400 | Bad request - segments data is missing | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Description + +Delete a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Segment deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### GET +##### Description + +Get a specific segment by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Update a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to update | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Description + +List child chunks for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunks retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Create a new child chunk for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Description + +Delete a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | Child chunk ID to delete | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Child chunk deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +#### PATCH +##### Description + +Update a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | +| child_chunk_id | path | Child chunk ID to update | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-text + +#### POST +##### Description + +Update an existing document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Summary + +Get all metadata for a dataset + +##### Description + +Get all metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +#### POST +##### Summary + +Create metadata for a dataset + +##### Description + +Create metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Metadata created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/built-in + +#### GET +##### Summary + +Get all built-in metadata fields + +##### Description + +Get all built-in metadata fields + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Built-in fields retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Summary + +Enable or disable built-in metadata field + +##### Description + +Enable or disable built-in metadata field + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Summary + +Delete metadata + +##### Description + +Delete metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Metadata deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +#### PATCH +##### Summary + +Update metadata name + +##### Description + +Update metadata name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +### /datasets/{dataset_id}/pipeline/datasource-plugins + +#### GET +##### Summary + +Resource for getting datasource plugins + +##### Description + +List all datasource plugins for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| is_published | query | Whether to get published or draft datasource plugins (true for published, false for draft, default: true) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource plugins retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Resource for getting datasource plugins + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource node run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/run + +#### POST +##### Summary + +Resource for running a rag pipeline + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Pipeline run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/retrieve + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/tags + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get tags bound to a specific dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /end-users/{end_user_id} + +#### GET +##### Summary + +Get end user detail + +##### Description + +Get an end user by ID +This endpoint is scoped to the current app token's tenant/app to prevent +cross-tenant/app access when an end-user ID is known. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| end_user_id | path | End user ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | End user retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | End user not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file for use in conversations +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - no file or invalid file | | +| 401 | Unauthorized - invalid API token | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /files/{file_id}/preview + +#### GET +##### Summary + +Preview/Download a file that was uploaded via Service API + +##### Description + +Preview or download a file uploaded via Service API +Provides secure file preview/download functionality. +Files can only be accessed if they belong to messages within the requesting app's context. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FilePreviewQuery](#filepreviewquery) | +| file_id | path | UUID of the file to preview | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | File retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - file access denied | +| 404 | File not found | + +### /form/human_input/{form_token} + +#### GET +##### Description + +Get a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +#### POST +##### Description + +Submit a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form submitted successfully | +| 400 | Bad request - invalid submission data | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +### /info + +#### GET +##### Summary + +Get app information + +##### Description + +Get basic application information +Returns basic information about the application including name, description, tags, and mode. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application info retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /messages + +#### GET +##### Summary + +List messages in a conversation + +##### Description + +List messages in a conversation +Retrieves messages with pagination support using first_id. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Messages retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or first message not found | + +### /messages/{message_id}/feedbacks + +#### POST +##### Summary + +Submit feedback for a message + +##### Description + +Submit feedback for a message +Allows users to rate messages as like/dislike and provide optional feedback content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | + +### /messages/{message_id}/suggested + +#### GET +##### Summary + +Get suggested follow-up questions for a message + +##### Description + +Get suggested follow-up questions for a message +Returns AI-generated follow-up questions based on the message content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Suggested questions retrieved successfully | +| 400 | Suggested questions feature is disabled | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | +| 500 | Internal server error | + +### /meta + +#### GET +##### Summary + +Get app metadata + +##### Description + +Get application metadata +Returns metadata about the application including configuration and settings. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve application input parameters and configuration +Returns the input form parameters and configuration for the application. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Parameters retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Get application site configuration +Returns the site configuration for the application including theme, icons, and text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Site configuration retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - site not found or tenant archived | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio using text-to-speech + +##### Description + +Convert text to audio using text-to-speech +Converts the provided text to audio using the specified voice. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text successfully converted to audio | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 500 | Internal server error | + +### /workflow/{task_id}/events + +#### GET +##### Description + +Get workflow execution events stream after resume + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Workflow run ID | Yes | string | +| continue_on_pause | query | Whether to keep the stream open across workflow_paused events,specify `"true"` to keep the stream open for `workflow_paused` events. | No | string | +| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string | +| user | query | End user identifier (query param) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | SSE event stream | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow run not found | + +### /workflows/logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow execution logs +Returns paginated workflow execution logs with filtering options. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowLogQuery](#workflowlogquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | +| 401 | Unauthorized - invalid API token | | + +### /workflows/run + +#### POST +##### Summary + +Execute a workflow + +##### Description + +Execute a workflow +Runs a workflow with the provided inputs and returns the results. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workflows/run/{workflow_run_id} + +#### GET +##### Summary + +Get a workflow task running detail + +##### Description + +Get workflow run details +Returns detailed information about a specific workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run details retrieved successfully | [WorkflowRunResponse](#workflowrunresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Workflow run not found | | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop a running workflow task + +##### Description + +Stop a running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /workflows/{workflow_id}/run + +#### POST +##### Summary + +Run specific workflow by ID + +##### Description + +Execute a specific workflow by ID +Executes a specific workflow version identified by its ID. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | +| workflow_id | path | Workflow ID to execute | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Summary + +Get available models by model type + +##### Description + +Get available models by model type +Returns a list of available models for the specified model type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | Type of model to retrieve | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Models retrieved successfully | +| 401 | Unauthorized - invalid API token | + +--- +### Models + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | Annotation answer | Yes | +| question | string | Annotation question | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationReplyActionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### ChatRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate_name | boolean | Auto generate conversation name | No | +| conversation_id | | Conversation UUID | No | +| files | | | No | +| inputs | object | | Yes | +| query | string | | Yes | +| response_mode | | | No | +| retriever_from | string | | No | +| workflow_id | | Workflow ID for advanced chat | No | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CompletionRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last conversation ID for pagination | No | +| limit | integer | Number of conversations to return | No | +| sort_by | string | Sort order for conversations
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariableInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| value | | | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last variable ID for pagination | No | +| limit | integer | Number of variables to return | No | +| variable_name | | Filter variables by name | No | + +#### DataSetTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| indexing_technique | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| status | | Document status filter | No | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentTextCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | string | | Yes | + +#### DocumentTextUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| name | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | | | No | + +#### FeedbackListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Number of feedbacks per page | No | +| page | integer | Page number | No | + +#### FilePreviewQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| as_attachment | boolean | Download as attachment | No | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| inputs | object | | Yes | + +#### JsonValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JsonValue | | | | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### PipelineRunApiEntity + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | +| response_mode | string | | Yes | +| start_node_id | string | | Yes | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segments | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| status | [ string ] | | No | + +#### SegmentUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | | | No | +| enabled | | | No | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segment | [SegmentUpdateArgs](#segmentupdateargs) | | Yes | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | | Yes | +| target_id | string | | Yes | + +#### TagCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### TagDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | string | | Yes | + +#### TagUnbindingPayload + +Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | | | No | +| tag_ids | [ string ] | | No | +| target_id | string | | Yes | + +#### TagUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| tag_id | string | | Yes | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | | No | +| created_at__before | | | No | +| created_by_account | | | No | +| created_by_end_user_session_id | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| response_mode | | | No | + +#### WorkflowRunResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| finished_at | | | No | +| id | string | | Yes | +| inputs | | | No | +| outputs | object | | No | +| status | string | | Yes | +| total_steps | | | No | +| total_tokens | | | No | +| workflow_id | string | | Yes | diff --git a/api/openapi/markdown/web-swagger.md b/api/openapi/markdown/web-swagger.md new file mode 100644 index 0000000000..c9b3b31357 --- /dev/null +++ b/api/openapi/markdown/web-swagger.md @@ -0,0 +1,1224 @@ +# Web API +Public APIs for web applications including file uploads, chat interactions, and app management + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## web +Web application API operations + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text + +##### Description + +Convert audio file to text using speech-to-text service. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal Server Error | + +### /chat-messages + +#### POST +##### Description + +Create a chat message for conversational applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /completion-messages + +#### POST +##### Description + +Create a completion message for text generation applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /conversations + +#### GET +##### Description + +Retrieve paginated list of conversations for a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last conversation ID for pagination | No | string | +| limit | query | Number of conversations to return (1-100) | No | integer | +| pinned | query | Filter by pinned status | No | string | +| sort_by | query | Sort order | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id} + +#### DELETE +##### Description + +Delete a specific conversation. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/name + +#### POST +##### Description + +Rename a specific conversation with a custom name or auto-generate one. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | +| auto_generate | query | Auto-generate conversation name | No | boolean | +| name | query | New conversation name | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/pin + +#### PATCH +##### Description + +Pin a specific conversation to keep it at the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation pinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/unpin + +#### PATCH +##### Description + +Unpin a specific conversation to remove it from the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation unpinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /email-code-login + +#### POST +##### Description + +Send email verification code for login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginSendPayload](#emailcodeloginsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | + +### /email-code-login/validity + +#### POST +##### Description + +Verify email code and complete login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginVerifyPayload](#emailcodeloginverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code verified and login successful | +| 400 | Bad request - invalid code or token | +| 401 | Invalid token or expired code | +| 404 | Account not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in web applications + +##### Description + +Upload a file for use in web applications +Accepts file uploads for use within web applications, supporting +multiple file types with automatic validation and storage. + +Args: + app_model: The associated application model + end_user: The end user uploading the file + +Form Parameters: + file: The file to upload (required) + source: Optional source type (datasets or None) + +Returns: + dict: File information including ID, URL, and metadata + int: HTTP status code 201 for success + +Raises: + NoFileUploadedError: No file provided in request + TooManyFilesError: Multiple files provided (only one allowed) + FilenameNotExistsError: File has no filename + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - invalid file or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset email sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | +| 429 | Too many requests - rate limit exceeded | + +### /forgot-password/resets + +#### POST +##### Description + +Reset user password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset successfully | +| 400 | Bad request - invalid parameters or password mismatch | +| 401 | Invalid or expired token | +| 404 | Account not found | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset token validity + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Token is valid | +| 400 | Bad request - invalid token format | +| 401 | Invalid or expired token | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by token + +##### Description + +GET /api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by token + +##### Description + +POST /api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Description + +Authenticate user for web application access + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Authentication successful | +| 400 | Bad request - invalid email or password format | +| 401 | Authentication failed - email or password mismatch | +| 403 | Account banned or login disabled | +| 404 | Account not found | + +### /login/status + +#### GET +##### Description + +Check login status + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Login status | +| 401 | Login status | + +### /logout + +#### POST +##### Description + +Logout user from web application + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Logout successful | + +### /messages + +#### GET +##### Description + +Retrieve paginated list of messages from a conversation in a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| conversation_id | query | Conversation UUID | Yes | string | +| first_id | query | First message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /messages/{message_id}/feedbacks + +#### POST +##### Description + +Submit feedback (like/dislike) for a specific message. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | +| content | query | Feedback content | No | string | +| rating | query | Feedback rating | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/more-like-this + +#### GET +##### Description + +Generate a new completion similar to an existing message (completion apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageMoreLikeThisQuery](#messagemorelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested follow-up questions after a message (chat apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a chat app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found or Conversation Not Found | +| 500 | Internal Server Error | + +### /meta + +#### GET +##### Summary + +Get app meta + +##### Description + +Retrieve the metadata for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve the parameters for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /passport + +#### GET +##### Description + +Get authentication passport for web application access + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Passport retrieved successfully | +| 401 | Unauthorized - missing app code or invalid authentication | +| 404 | Application or user not found | + +### /remote-files/upload + +#### POST +##### Summary + +Upload a file from a remote URL + +##### Description + +Upload a file from a remote URL +Downloads a file from the provided remote URL and uploads it +to the platform storage for use in web applications. + +Args: + app_model: The associated application model + end_user: The end user making the request + +JSON Parameters: + url: The remote URL to download the file from (required) + +Returns: + dict: File information including ID, signed URL, and metadata + int: HTTP status code 201 for success + +Raises: + RemoteFileUploadError: Failed to fetch file from remote URL + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Remote file uploaded successfully | [FileWithSignedUrl](#filewithsignedurl) | +| 400 | Bad request - invalid URL or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | +| 500 | Failed to fetch remote file | | + +### /remote-files/{url} + +#### GET +##### Summary + +Get information about a remote file + +##### Description + +Get information about a remote file +Retrieves basic information about a file located at a remote URL, +including content type and content length. + +Args: + app_model: The associated application model + end_user: The end user making the request + url: URL-encoded path to the remote file + +Returns: + dict: Remote file information including type and length + +Raises: + HTTPException: If the remote file cannot be accessed + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Remote file information retrieved successfully | [RemoteFileInfo](#remotefileinfo) | +| 400 | Bad request - invalid URL | | +| 404 | Remote file not found | | +| 500 | Failed to fetch remote file | | + +### /saved-messages + +#### GET +##### Description + +Retrieve paginated list of saved messages for a completion application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +#### POST +##### Description + +Save a specific message for later reference. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | query | Message UUID to save | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message saved successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /saved-messages/{message_id} + +#### DELETE +##### Description + +Remove a message from saved messages. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Message removed successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Retrieve app site information and configuration. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /system-features + +#### GET +##### Summary + +Get system feature flags and configuration + +##### Description + +Get system feature flags and configuration +Returns the current system feature flags and configuration +that control various functionalities across the platform. + +Returns: + dict: System feature configuration object + +This endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py, +except it is intended for use by the web app, instead of the console dashboard. + +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for webapp initialization. + +Authentication would create circular dependency (can't authenticate without webapp loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | System features retrieved successfully | +| 500 | Internal server error | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio + +##### Description + +Convert text to audio using text-to-speech service. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 500 | Internal Server Error | + +### /webapp/access-mode + +#### GET +##### Description + +Retrieve the access mode for a web application (public or restricted). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appCode | query | Application code | No | string | +| appId | query | Application ID | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 500 | Internal Server Error | + +### /webapp/permission + +#### GET +##### Description + +Check if user has permission to access a web application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appId | query | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 500 | Internal Server Error | + +### /workflows/run + +#### POST +##### Summary + +Run workflow + +##### Description + +Execute a workflow with provided inputs and files. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop a running workflow task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +--- +## default +Default namespace + +### /workflow/{task_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### AppAccessModeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| appCode | | Application code | No | +| appId | | Application ID | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Files to be processed | No | +| inputs | object | Input variables for the chat | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query/message | Yes | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Files to be processed | No | +| inputs | object | Input variables for the completion | Yes | +| query | string | Query text for completion | No | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | +| sort_by | string | *Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### EmailCodeLoginSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailCodeLoginVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### FileWithSignedUrl + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| size | integer | | Yes | +| url | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| password | string | | Yes | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MessageMoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | Yes | + +#### RemoteFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_length | integer | | Yes | +| file_type | string | | Yes | + +#### RemoteFileUploadPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| url | string (uri) | Remote file URL | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index 1b97746dea..0900dfda97 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -3,6 +3,7 @@ from collections.abc import Mapping from typing import Any, cast from unittest.mock import MagicMock +import pytest from dify_trace_aliyun.entities.semconv import ( GEN_AI_FRAMEWORK, GEN_AI_SESSION_ID, @@ -31,7 +32,7 @@ from graphon.enums import WorkflowNodeExecutionStatus from models import EndUser -def test_get_user_id_from_message_data_no_end_user(monkeypatch): +def test_get_user_id_from_message_data_no_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = None @@ -39,7 +40,7 @@ def test_get_user_id_from_message_data_no_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "account_id" -def test_get_user_id_from_message_data_with_end_user(monkeypatch): +def test_get_user_id_from_message_data_with_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -57,7 +58,7 @@ def test_get_user_id_from_message_data_with_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "session_id" -def test_get_user_id_from_message_data_end_user_not_found(monkeypatch): +def test_get_user_id_from_message_data_end_user_not_found(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -111,7 +112,7 @@ def test_get_workflow_node_status(): assert status.status_code == StatusCode.UNSET -def test_create_links_from_trace_id(monkeypatch): +def test_create_links_from_trace_id(monkeypatch: pytest.MonkeyPatch): # Mock create_link mock_link = MagicMock(spec=Link) import dify_trace_aliyun.data_exporter.traceclient diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py index 952f10c34f..95e27c791f 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py @@ -40,7 +40,7 @@ def langfuse_config(): @pytest.fixture -def trace_instance(langfuse_config, monkeypatch): +def trace_instance(langfuse_config, monkeypatch: pytest.MonkeyPatch): # Mock Langfuse client to avoid network calls mock_client = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", lambda **kwargs: mock_client) @@ -49,7 +49,7 @@ def trace_instance(langfuse_config, monkeypatch): return instance -def test_init(langfuse_config, monkeypatch): +def test_init(langfuse_config, monkeypatch: pytest.MonkeyPatch): mock_langfuse = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", mock_langfuse) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -64,7 +64,7 @@ def test_init(langfuse_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -114,7 +114,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info trace_info = WorkflowTraceInfo( workflow_id="wf-1", @@ -218,7 +218,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert other_span.level == LevelEnum.ERROR -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -259,7 +259,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): assert trace_data.name == TraceTaskName.WORKFLOW_TRACE -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -287,7 +287,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -331,7 +331,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): assert gen_data.usage.total == 30 -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -636,7 +636,7 @@ def test_langfuse_trace_entity_with_list_dict_input(): assert data.input[0]["content"] == "hello" -def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): # Setup trace info to trigger LLM node usage extraction trace_info = WorkflowTraceInfo( workflow_id="wf-1", diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py index 45e5894e4a..ee59acb17e 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py @@ -35,7 +35,7 @@ def langsmith_config(): @pytest.fixture -def trace_instance(langsmith_config, monkeypatch): +def trace_instance(langsmith_config, monkeypatch: pytest.MonkeyPatch): # Mock LangSmith client mock_client = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", lambda **kwargs: mock_client) @@ -44,7 +44,7 @@ def trace_instance(langsmith_config, monkeypatch): return instance -def test_init(langsmith_config, monkeypatch): +def test_init(langsmith_config, monkeypatch: pytest.MonkeyPatch): mock_client_class = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", mock_client_class) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -57,7 +57,7 @@ def test_init(langsmith_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -107,7 +107,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace(trace_instance, monkeypatch): +def test_workflow_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info workflow_data = MagicMock() workflow_data.created_at = _dt() @@ -223,7 +223,7 @@ def test_workflow_trace(trace_instance, monkeypatch): assert call_args[4].run_type == LangSmithRunType.retriever -def test_workflow_trace_no_start_time(trace_instance, monkeypatch): +def test_workflow_trace_no_start_time(trace_instance, monkeypatch: pytest.MonkeyPatch): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) @@ -266,7 +266,7 @@ def test_workflow_trace_no_start_time(trace_instance, monkeypatch): assert trace_instance.add_run.called -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = MagicMock(spec=WorkflowTraceInfo) trace_info.trace_id = "trace-1" trace_info.message_id = None @@ -290,7 +290,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace(trace_instance, monkeypatch): +def test_message_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -516,7 +516,7 @@ def test_update_run_error(trace_instance): trace_instance.update_run(update_data) -def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 46c9750a5d..324f894b25 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -614,7 +614,7 @@ class TestMessageTrace: span.set_status.assert_called_once() span.add_event.assert_called_once() - def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch: pytest.MonkeyPatch): span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" diff --git a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py index eefed3c78c..5daaa7132c 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py @@ -35,7 +35,7 @@ def opik_config(): @pytest.fixture -def trace_instance(opik_config, monkeypatch): +def trace_instance(opik_config, monkeypatch: pytest.MonkeyPatch): mock_client = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", lambda **kwargs: mock_client) @@ -65,7 +65,7 @@ def test_prepare_opik_uuid(): assert result is not None -def test_init(opik_config, monkeypatch): +def test_init(opik_config, monkeypatch: pytest.MonkeyPatch): mock_opik = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", mock_opik) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -82,7 +82,7 @@ def test_init(opik_config, monkeypatch): assert instance.project == opik_config.project -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -132,7 +132,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "fb05c7cd-6cec-4add-8a84-df03a408b4ce" WORKFLOW_RUN_ID = "33c67568-7a8a-450e-8916-a5f135baeaef" @@ -221,7 +221,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert trace_instance.add_span.call_count >= 1 -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "f0708b36-b1d7-42b3-a876-1d01b7d8f1a3" WORKFLOW_RUN_ID = "d42ec285-c2fd-4248-8866-5c9386b101ac" @@ -265,7 +265,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): trace_instance.add_trace.assert_called_once() -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="5745f1b8-f8e6-4859-8110-996acb6c8d6a", tenant_id="tenant-1", @@ -293,7 +293,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability MESSAGE_DATA_ID = "e3a26712-8cac-4a25-94a4-a3bff21ee3ab" CONVERSATION_ID = "9d3f3751-7521-4c19-9307-20e3cf6789a3" @@ -340,7 +340,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): trace_instance.add_span.assert_called_once() -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "85411059-79fb-4deb-a76c-c2e215f1b97e" message_data.from_account_id = "acc-1" @@ -614,7 +614,7 @@ def test_get_project_url_error(trace_instance): trace_instance.get_project_url() -def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): trace_info = WorkflowTraceInfo( workflow_id="86a52565-4a6b-4a1b-9bfd-98e4595e70de", tenant_id="66e8e918-472e-4b69-8051-12502c34fc07", diff --git a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py index 6028d0c550..30646815d8 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py @@ -267,14 +267,14 @@ class TestInit: with pytest.raises(ValueError, match="Weave login failed"): WeaveDataTrace(config) - def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL is read from environment.""" monkeypatch.setenv("FILES_URL", "http://files.example.com") config = _make_weave_config() instance = WeaveDataTrace(config) assert instance.file_base_url == "http://files.example.com" - def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL defaults to http://127.0.0.1:5001.""" monkeypatch.delenv("FILES_URL", raising=False) config = _make_weave_config() @@ -302,7 +302,7 @@ class TestGetProjectUrl: url = instance.get_project_url() assert url == "https://wandb.ai/my-project" - def test_get_project_url_exception_raises(self, trace_instance, monkeypatch): + def test_get_project_url_exception_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when exception occurs in get_project_url.""" monkeypatch.setattr(trace_instance, "entity", None) monkeypatch.setattr(trace_instance, "project_name", None) @@ -583,7 +583,7 @@ class TestFinishCall: class TestWorkflowTrace: - def _setup_repo(self, monkeypatch, nodes=None): + def _setup_repo(self, monkeypatch: pytest.MonkeyPatch, nodes=None): """Helper to patch session/repo dependencies.""" if nodes is None: nodes = [] @@ -599,7 +599,7 @@ class TestWorkflowTrace: monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) return repo - def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with no nodes and no message_id.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -614,7 +614,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 assert trace_instance.finish_call.call_count == 1 - def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with message_id creates both message and workflow runs.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -629,7 +629,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch): + def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace iterates node executions and creates node runs.""" node = _make_node( id="node-1", @@ -652,7 +652,7 @@ class TestWorkflowTrace: # workflow run + node run = 2 calls assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch): + def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """LLM node uses process_data prompts as inputs.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -680,7 +680,7 @@ class TestWorkflowTrace: # The key "messages" should be present (validator transforms the list) assert "messages" in node_run.inputs - def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch): + def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Non-LLM node uses node_execution.inputs directly.""" node = _make_node( node_type=BuiltinNodeTypes.TOOL, @@ -701,7 +701,7 @@ class TestWorkflowTrace: node_run = node_call_args[0][0] assert node_run.inputs.get("tool_input") == "val" - def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch): + def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when app_id is missing from metadata.""" monkeypatch.setattr("dify_trace_weave.weave_trace.sessionmaker", lambda bind: MagicMock()) monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) @@ -714,7 +714,7 @@ class TestWorkflowTrace: with pytest.raises(ValueError, match="No app_id found in trace_info metadata"): trace_instance.workflow_trace(trace_info) - def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch): + def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """start_time defaults to datetime.now() when None.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -727,7 +727,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 - def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch): + def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Node with created_at=None uses datetime.now().""" node = _make_node(created_at=None, elapsed_time=0.5) self._setup_repo(monkeypatch, nodes=[node]) @@ -740,7 +740,7 @@ class TestWorkflowTrace: trace_instance.workflow_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch): + def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Chat mode LLM node adds ls_provider and ls_model_name to attributes.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -765,7 +765,7 @@ class TestWorkflowTrace: assert node_run.attributes.get("ls_provider") == "openai" assert node_run.attributes.get("ls_model_name") == "gpt-4" - def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch): + def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Nodes are sorted by created_at before processing.""" node1 = _make_node(id="node-b", created_at=_dt() + timedelta(seconds=2)) node2 = _make_node(id="node-a", created_at=_dt()) @@ -799,7 +799,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) trace_instance.start_call.assert_not_called() - def test_basic_message_trace(self, trace_instance, monkeypatch): + def test_basic_message_trace(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace creates message run and llm child run.""" monkeypatch.setattr( "dify_trace_weave.weave_trace.db.session.get", @@ -816,7 +816,7 @@ class TestMessageTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_message_trace_with_file_data(self, trace_instance, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace appends file URL to file_list.""" file_data = MagicMock() file_data.url = "path/to/file.png" @@ -839,7 +839,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert "http://files.test/path/to/file.png" in message_run.file_list - def test_message_trace_with_end_user(self, trace_instance, monkeypatch): + def test_message_trace_with_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace looks up end user and sets end_user_id attribute.""" end_user = MagicMock() end_user.session_id = "session-xyz" @@ -862,7 +862,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.attributes.get("end_user_id") == "session-xyz" - def test_message_trace_no_end_user(self, trace_instance, monkeypatch): + def test_message_trace_no_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles when from_end_user_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -880,7 +880,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch): + def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """trace_id falls back to message_id when trace_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -895,7 +895,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.id == "msg-1" - def test_message_trace_file_list_none(self, trace_instance, monkeypatch): + def test_message_trace_file_list_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles file_list=None gracefully.""" mock_db = MagicMock() mock_db.session.get.return_value = None diff --git a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py index a907f918c3..37b2331f0f 100644 --- a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py +++ b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py @@ -20,7 +20,7 @@ def test_validate_distance_function_rejects_unsupported_values(): factory._validate_distance_function("dot_product") -def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch): +def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-1", @@ -45,7 +45,7 @@ def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch assert vector_cls.call_args.kwargs["collection_name"] == "existing_collection" -def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch): +def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-2", diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py index d1d471761d..2e8052b7dc 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py @@ -83,7 +83,7 @@ def test_get_type_is_analyticdb(): assert vector.get_type() == "analyticdb" -def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): +def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) @@ -109,7 +109,7 @@ def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): assert dataset.index_struct is not None -def test_factory_builds_sql_config_when_host_is_present(monkeypatch): +def test_factory_builds_sql_config_when_host_is_present(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace( id="dataset-2", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py index d2d735ae3e..26bd385333 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py @@ -24,7 +24,7 @@ def _request_class(name: str): return _Request -def _install_openapi_stubs(monkeypatch): +def _install_openapi_stubs(monkeypatch: pytest.MonkeyPatch): gpdb_package = types.ModuleType("alibabacloud_gpdb20160503") gpdb_package.__path__ = [] gpdb_models = types.ModuleType("alibabacloud_gpdb20160503.models") @@ -130,7 +130,7 @@ def test_openapi_config_to_client_params(): assert params["read_timeout"] == 60000 -def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): +def test_init_creates_openapi_client_and_runs_initialize(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) initialize_mock = MagicMock() monkeypatch.setattr(openapi_module.AnalyticdbVectorOpenAPI, "_initialize", initialize_mock) @@ -145,7 +145,7 @@ def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): initialize_mock.assert_called_once_with() -def test_initialize_skips_when_cached(monkeypatch): +def test_initialize_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -164,7 +164,7 @@ def test_initialize_skips_when_cached(monkeypatch): vector._create_namespace_if_not_exists.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -184,7 +184,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_initialize_vector_database_calls_openapi_client(monkeypatch): +def test_initialize_vector_database_calls_openapi_client(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -199,7 +199,7 @@ def test_initialize_vector_database_calls_openapi_client(monkeypatch): assert request.manager_account_password == "password" -def test_create_namespace_creates_when_namespace_not_found(monkeypatch): +def test_create_namespace_creates_when_namespace_not_found(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -211,7 +211,7 @@ def test_create_namespace_creates_when_namespace_not_found(monkeypatch): vector._client.create_namespace.assert_called_once() -def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): +def test_create_namespace_raises_on_unexpected_api_error(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -222,7 +222,7 @@ def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): vector._create_namespace_if_not_exists() -def test_create_namespace_noop_when_namespace_exists(monkeypatch): +def test_create_namespace_noop_when_namespace_exists(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -234,7 +234,7 @@ def test_create_namespace_noop_when_namespace_exists(monkeypatch): vector._client.create_namespace.assert_not_called() -def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): +def test_create_collection_if_not_exists_creates_when_missing(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -255,7 +255,7 @@ def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): +def test_create_collection_if_not_exists_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -274,7 +274,7 @@ def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): vector._client.create_collection.assert_not_called() -def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): +def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -293,7 +293,7 @@ def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): vector.create_collection_if_not_exists(embedding_dimension=512) -def test_openapi_add_delete_and_search_methods(monkeypatch): +def test_openapi_add_delete_and_search_methods(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -348,7 +348,7 @@ def test_openapi_add_delete_and_search_methods(monkeypatch): assert docs_by_text[0].page_content == "high" -def test_text_exists_returns_false_when_matches_empty(monkeypatch): +def test_text_exists_returns_false_when_matches_empty(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -361,7 +361,7 @@ def test_text_exists_returns_false_when_matches_empty(monkeypatch): assert vector.text_exists("missing-id") is False -def test_openapi_delete_success(monkeypatch): +def test_openapi_delete_success(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -372,7 +372,7 @@ def test_openapi_delete_success(monkeypatch): vector._client.delete_collection.assert_called_once() -def test_openapi_delete_propagates_errors(monkeypatch): +def test_openapi_delete_propagates_errors(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py index 49a2ae72d0..cd255b37cf 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py @@ -53,7 +53,7 @@ def test_sql_config_rejects_min_connection_greater_than_max_connection(): AnalyticdbVectorBySqlConfig.model_validate(values) -def test_initialize_skips_when_cache_exists(monkeypatch): +def test_initialize_skips_when_cache_exists(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -70,7 +70,7 @@ def test_initialize_skips_when_cache_exists(monkeypatch): vector._initialize_vector_database.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -88,7 +88,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): sql_module.redis_client.set.assert_called_once() -def test_create_connection_pool_uses_psycopg2_pool(monkeypatch): +def test_create_connection_pool_uses_psycopg2_pool(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -119,7 +119,7 @@ def test_get_cursor_context_manager_handles_connection_lifecycle(): pool.putconn.assert_called_once_with(connection) -def test_add_texts_inserts_only_documents_with_metadata(monkeypatch): +def test_add_texts_inserts_only_documents_with_metadata(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.table_name = "dify.collection" @@ -273,7 +273,7 @@ def test_delete_drops_table(): cursor.execute.assert_called_once() -def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch): +def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch: pytest.MonkeyPatch): config = AnalyticdbVectorBySqlConfig(**_config_values()) created_pool = MagicMock() @@ -288,7 +288,7 @@ def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypat assert vector.pool is created_pool -def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch): +def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -326,7 +326,7 @@ def test_initialize_vector_database_handles_existing_database_and_search_config( assert any("CREATE SCHEMA IF NOT EXISTS dify" in call.args[0] for call in worker_cursor.execute.call_args_list) -def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch): +def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -353,7 +353,7 @@ def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(mon worker_connection.rollback.assert_called_once() -def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch): +def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" @@ -381,7 +381,7 @@ def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeyp sql_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch): +def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" diff --git a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py index 851c09f47a..f0dddee3b9 100644 --- a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py +++ b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py @@ -121,7 +121,7 @@ def _build_fake_pymochow_modules(): @pytest.fixture -def baidu_module(monkeypatch): +def baidu_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymochow_modules().items(): monkeypatch.setitem(sys.modules, name, module) import dify_vdb_baidu.baidu_vector as module @@ -254,7 +254,7 @@ def test_search_methods_delegate_to_database_table(baidu_module): assert vector._get_search_res.call_count == 2 -def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch): +def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch: pytest.MonkeyPatch): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) monkeypatch.setattr(baidu_module.Dataset, "gen_collection_name_by_id", lambda _id: "AUTO_COLLECTION") @@ -279,7 +279,7 @@ def test_factory_initializes_collection_name_and_index_struct(baidu_module, monk assert dataset.index_struct is not None -def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch): +def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch: pytest.MonkeyPatch): init_client = MagicMock(return_value="client") init_database = MagicMock(return_value="database") monkeypatch.setattr(baidu_module.BaiduVector, "_init_client", init_client) @@ -372,7 +372,7 @@ def test_get_search_result_handles_invalid_metadata_json(baidu_module): assert "document_id" not in docs[0].metadata -def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch): +def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch: pytest.MonkeyPatch): credentials = MagicMock(return_value="credentials") configuration = MagicMock(return_value="configuration") client_cls = MagicMock(return_value="client") @@ -411,7 +411,7 @@ def test_init_database_raises_for_unknown_create_database_error(baidu_module): vector._init_database() -def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch): +def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -460,7 +460,7 @@ def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypat vector._wait_for_index_ready.assert_called_once_with(table, 3600) -def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch): +def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._db = MagicMock() @@ -493,7 +493,7 @@ def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypat vector._create_table(3) -def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch): +def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -524,7 +524,9 @@ def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, vector._create_table(3) -def test_factory_uses_existing_collection_prefix_when_index_struct_exists(baidu_module, monkeypatch): +def test_factory_uses_existing_collection_prefix_when_index_struct_exists( + baidu_module, monkeypatch: pytest.MonkeyPatch +): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py index b209c9df96..f18f9a6561 100644 --- a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py +++ b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py @@ -44,7 +44,7 @@ def _build_fake_chroma_modules(): @pytest.fixture -def chroma_module(monkeypatch): +def chroma_module(monkeypatch: pytest.MonkeyPatch): fake_chroma = _build_fake_chroma_modules() monkeypatch.setitem(sys.modules, "chromadb", fake_chroma) import dify_vdb_chroma.chroma_vector as module @@ -73,7 +73,7 @@ def test_chroma_config_to_params_builds_expected_payload(chroma_module): assert params["settings"].chroma_client_auth_credentials == "credentials" -def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch): +def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -173,7 +173,7 @@ def test_search_by_full_text_returns_empty_list(chroma_module): assert vector.search_by_full_text("query") == [] -def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch): +def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch: pytest.MonkeyPatch): factory = chroma_module.ChromaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py index a7473f1b91..4f8395e475 100644 --- a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py +++ b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py @@ -45,7 +45,7 @@ def _build_fake_clickzetta_module(): @pytest.fixture -def clickzetta_module(monkeypatch): +def clickzetta_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "clickzetta", _build_fake_clickzetta_module()) import dify_vdb_clickzetta.clickzetta_vector as module @@ -218,7 +218,7 @@ def test_search_by_like_returns_documents_with_default_score(clickzetta_module): assert docs[0].metadata["score"] == 0.5 -def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): +def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch: pytest.MonkeyPatch): factory = clickzetta_module.ClickzettaVectorFactory() dataset = SimpleNamespace(id="dataset-1") @@ -243,7 +243,7 @@ def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): assert vector_cls.call_args.kwargs["collection_name"] == "collection" -def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch): +def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch: pytest.MonkeyPatch): clickzetta_module.ClickzettaConnectionPool._instance = None monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) @@ -255,7 +255,7 @@ def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch assert "username:instance:service:workspace:cluster:dify" in key -def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -274,7 +274,7 @@ def test_connection_pool_create_connection_retries_and_configures(clickzetta_mod pool._configure_connection.assert_called_once_with(connection) -def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -318,7 +318,7 @@ def test_connection_pool_configure_connection_swallows_errors(clickzetta_module) monkeypatch.undo() -def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch): +def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -360,7 +360,7 @@ def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monk assert pool._shutdown is True -def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch): +def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False pool._cleanup_expired_connections = MagicMock(side_effect=lambda: setattr(pool, "_shutdown", True)) @@ -384,7 +384,7 @@ def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module pool._cleanup_expired_connections.assert_called_once() -def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch): +def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() pool.get_connection.return_value = "conn" monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "get_instance", MagicMock(return_value=pool)) @@ -405,7 +405,7 @@ def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypat assert vector._ensure_connection() == "conn" -def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch): +def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch: pytest.MonkeyPatch): class _Thread: def __init__(self, target, daemon): self.target = target @@ -579,7 +579,7 @@ def test_create_inverted_index_branches(clickzetta_module): vector._create_inverted_index(cursor) -def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch): +def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch: pytest.MonkeyPatch): vector = clickzetta_module.ClickzettaVector.__new__(clickzetta_module.ClickzettaVector) vector._config = _config(clickzetta_module) vector._config.batch_size = 2 @@ -811,7 +811,7 @@ def test_clickzetta_pool_cleanup_and_shutdown_edge_paths(clickzetta_module): assert pool._shutdown is True -def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch): +def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False diff --git a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py index 7e5c40b8f2..d474b566d3 100644 --- a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py +++ b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py @@ -150,7 +150,7 @@ def _build_fake_couchbase_modules(): @pytest.fixture -def couchbase_module(monkeypatch): +def couchbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_couchbase_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -194,7 +194,7 @@ def test_init_sets_cluster_handles(couchbase_module): vector._cluster.wait_until_ready.assert_called_once() -def test_create_and_create_collection_branches(couchbase_module, monkeypatch): +def test_create_and_create_collection_branches(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector.__new__(couchbase_module.CouchbaseVector) vector._collection_name = "collection_1" vector._client_config = _config(couchbase_module) @@ -319,7 +319,7 @@ def test_search_methods_and_format_metadata(couchbase_module): assert vector._format_metadata({"metadata.a": 1, "plain": 2}) == {"a": 1, "plain": 2} -def test_delete_collection_and_factory(couchbase_module, monkeypatch): +def test_delete_collection_and_factory(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector("collection_1", _config(couchbase_module)) scopes = [ SimpleNamespace(collections=[SimpleNamespace(name="other")]), diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py index f81ed6beea..91cc2e0fdb 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py @@ -28,7 +28,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_ja_module(monkeypatch): +def elasticsearch_ja_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -39,7 +39,7 @@ def elasticsearch_ja_module(monkeypatch): return importlib.reload(ja_module) -def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): +def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -57,7 +57,7 @@ def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): elasticsearch_ja_module.redis_client.set.assert_not_called() -def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch): +def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -87,7 +87,7 @@ def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monk elasticsearch_ja_module.redis_client.set.assert_called_once() -def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch): +def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_ja_module.ElasticSearchJaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py index 48f1f6dc26..d54c105a0f 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py @@ -38,7 +38,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_module(monkeypatch): +def elasticsearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -287,7 +287,7 @@ def test_search_by_vector_and_full_text(elasticsearch_module): assert "bool" in query -def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): +def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): elasticsearch_module.redis_client.set.assert_called_once() -def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch): +def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_module.ElasticSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py index f9a557ecce..8b197662e3 100644 --- a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py +++ b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py @@ -38,7 +38,7 @@ def _build_fake_hologres_modules(): @pytest.fixture -def hologres_module(monkeypatch): +def hologres_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_hologres_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -266,7 +266,7 @@ def test_delete_handles_existing_and_missing_tables(hologres_module): vector._client.drop_table.assert_called_once_with(vector.table_name) -def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch): +def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -281,7 +281,7 @@ def test_create_collection_returns_early_when_cache_hits(hologres_module, monkey hologres_module.redis_client.set.assert_not_called() -def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch): +def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -313,7 +313,7 @@ def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatc hologres_module.redis_client.set.assert_called_once() -def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch): +def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -331,7 +331,7 @@ def test_create_collection_raises_when_table_never_becomes_ready(hologres_module hologres_module.redis_client.set.assert_not_called() -def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch): +def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch: pytest.MonkeyPatch): factory = hologres_module.HologresVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py index ba3f14912b..a1617b6d43 100644 --- a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py +++ b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py @@ -29,7 +29,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def huawei_module(monkeypatch): +def huawei_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -155,7 +155,7 @@ def test_search_by_vector_and_full_text(huawei_module): assert docs[0].page_content == "text-hit" -def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch): +def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch: pytest.MonkeyPatch): class FakeDocument: def __init__(self, page_content, vector, metadata): self.page_content = page_content @@ -185,7 +185,7 @@ def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch assert docs == [] -def test_create_and_create_collection_paths(huawei_module, monkeypatch): +def test_create_and_create_collection_paths(huawei_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -218,7 +218,7 @@ def test_create_and_create_collection_paths(huawei_module, monkeypatch): huawei_module.redis_client.set.assert_called_once() -def test_huawei_factory_branches(huawei_module, monkeypatch): +def test_huawei_factory_branches(huawei_module, monkeypatch: pytest.MonkeyPatch): factory = huawei_module.HuaweiCloudVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py index 8c038e82b9..b4ea6ea6c1 100644 --- a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py +++ b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py @@ -23,7 +23,7 @@ def _build_fake_iris_module(): @pytest.fixture -def iris_module(monkeypatch): +def iris_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "iris", _build_fake_iris_module()) import dify_vdb_iris.iris_vector as module @@ -249,7 +249,7 @@ def test_iris_vector_init_get_cursor_and_create(iris_module): vector._create_collection.assert_called_once_with(2) -def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): +def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module)) @@ -297,7 +297,7 @@ def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): assert docs[0].metadata["score"] == pytest.approx(0.9) -def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): +def test_iris_vector_full_text_search_paths(iris_module, monkeypatch: pytest.MonkeyPatch): cfg = _config(iris_module, IRIS_TEXT_INDEX=True) with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", cfg) @@ -344,7 +344,7 @@ def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): assert vector_like.search_by_full_text("100%", top_k=1) == [] -def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch): +def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module, IRIS_TEXT_INDEX=True)) diff --git a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py index 238145c1d6..4a408d1b10 100644 --- a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py +++ b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py @@ -47,7 +47,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def lindorm_module(monkeypatch): +def lindorm_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -100,7 +100,7 @@ def test_to_opensearch_params_and_init(lindorm_module): assert vector_ugc._routing == "route" -def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch): +def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore( "collection", _config(lindorm_module), using_ugc=True, routing_value="route" ) @@ -301,7 +301,7 @@ def test_search_by_full_text_success_and_error(lindorm_module): vector.search_by_full_text("hello") -def test_create_collection_paths(lindorm_module, monkeypatch): +def test_create_collection_paths(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore("collection", _config(lindorm_module), using_ugc=False) with pytest.raises(ValueError, match="cannot be empty"): @@ -331,7 +331,7 @@ def test_create_collection_paths(lindorm_module, monkeypatch): vector._client.indices.create.assert_not_called() -def test_lindorm_factory_branches(lindorm_module, monkeypatch): +def test_lindorm_factory_branches(lindorm_module, monkeypatch: pytest.MonkeyPatch): factory = lindorm_module.LindormVectorStoreFactory() monkeypatch.setattr(lindorm_module.dify_config, "LINDORM_URL", "http://localhost:9200") diff --git a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py index c22f4304e5..762ec330b2 100644 --- a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py +++ b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py @@ -32,7 +32,7 @@ def _build_fake_mo_vector_modules(): @pytest.fixture -def matrixone_module(monkeypatch): +def matrixone_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_mo_vector_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -70,7 +70,7 @@ def test_matrixone_config_validation(matrixone_module, field, value, message): matrixone_module.MatrixoneConfig.model_validate(values) -def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch): +def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -86,7 +86,7 @@ def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, matrixone_module.redis_client.set.assert_called_once() -def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch): +def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -146,7 +146,7 @@ def test_get_type_and_create_delegate_to_add_texts(matrixone_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch): +def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -165,7 +165,7 @@ def test_get_client_handles_full_text_index_creation_error(matrixone_module, mon matrixone_module.redis_client.set.assert_not_called() -def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch): +def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch: pytest.MonkeyPatch): vector = matrixone_module.MatrixoneVector("collection_1", _valid_config(matrixone_module)) vector.client = MagicMock() monkeypatch.setattr(matrixone_module.uuid, "uuid4", lambda: "generated-uuid") @@ -224,7 +224,7 @@ def test_search_by_vector_builds_documents(matrixone_module): assert vector.client.query.call_args.kwargs["filter"] == {"document_id": {"$in": ["d-1"]}} -def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch): +def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch: pytest.MonkeyPatch): factory = matrixone_module.MatrixoneVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py index 36c0ed8f6f..730ff9f296 100644 --- a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py +++ b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py @@ -99,7 +99,7 @@ def _build_fake_pymilvus_modules(): @pytest.fixture -def milvus_module(monkeypatch): +def milvus_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymilvus_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -327,7 +327,7 @@ def test_process_search_results_and_search_methods(milvus_module): assert "document_id" in vector._client.search.call_args.kwargs["filter"] -def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch): +def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -351,7 +351,7 @@ def test_create_collection_cache_and_existing_collection(milvus_module, monkeypa milvus_module.redis_client.set.assert_called() -def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch): +def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -385,7 +385,7 @@ def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch) assert call_kwargs["consistency_level"] == "Session" -def test_factory_initializes_milvus_vector(milvus_module, monkeypatch): +def test_factory_initializes_milvus_vector(milvus_module, monkeypatch: pytest.MonkeyPatch): factory = milvus_module.MilvusVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py index 228ea92639..900c75fdab 100644 --- a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py +++ b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py @@ -38,7 +38,7 @@ def _build_fake_clickhouse_connect_module(): @pytest.fixture -def myscale_module(monkeypatch): +def myscale_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_clickhouse_connect_module() monkeypatch.setitem(sys.modules, "clickhouse_connect", fake_module) @@ -90,7 +90,7 @@ def test_delete_by_ids_short_circuits_on_empty_list(myscale_module): vector._client.command.assert_not_called() -def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch): +def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch: pytest.MonkeyPatch): factory = myscale_module.MyScaleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -160,7 +160,7 @@ def test_create_collection_builds_expected_sql(myscale_module): assert "INDEX text_idx text TYPE fts('tokenizer=unicode')" in sql -def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch): +def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch: pytest.MonkeyPatch): vector = myscale_module.MyScaleVector("collection_1", _config(myscale_module)) monkeypatch.setattr(myscale_module.uuid, "uuid4", lambda: "generated-uuid") docs = [ diff --git a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py index 31f9ff3e56..36393cc486 100644 --- a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py +++ b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py @@ -53,7 +53,7 @@ def _build_fake_pyobvector_module(): @pytest.fixture -def oceanbase_module(monkeypatch): +def oceanbase_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "pyobvector", _build_fake_pyobvector_module()) import dify_vdb_oceanbase.oceanbase_vector as module @@ -208,7 +208,7 @@ def test_create_delegates_to_collection_and_insert(oceanbase_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch): +def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -234,7 +234,7 @@ def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_mod vector.delete.assert_not_called() -def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch): +def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -271,7 +271,7 @@ def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, mo oceanbase_module.redis_client.set.assert_called_once() -def test_create_collection_error_paths(oceanbase_module, monkeypatch): +def test_create_collection_error_paths(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -308,7 +308,7 @@ def test_create_collection_error_paths(oceanbase_module, monkeypatch): vector._create_collection() -def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch): +def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -517,7 +517,7 @@ def test_delete_success_and_exception(oceanbase_module): vector.delete() -def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch): +def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch: pytest.MonkeyPatch): factory = oceanbase_module.OceanBaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py index 09abd625fc..57c9b14d9f 100644 --- a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py +++ b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def opengauss_module(monkeypatch): +def opengauss_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -88,7 +88,7 @@ def test_opengauss_config_validation_rejects_min_greater_than_max(opengauss_modu opengauss_module.OpenGaussConfig.model_validate(values) -def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): +def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -99,7 +99,7 @@ def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): assert vector.pool is pool -def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): +def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -126,7 +126,7 @@ def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch): +def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -158,7 +158,7 @@ def test_search_by_vector_validates_top_k(opengauss_module): vector.search_by_vector([0.1, 0.2], top_k=0) -def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch): +def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -200,7 +200,7 @@ def test_create_calls_collection_insert_and_index(opengauss_module): vector._create_index.assert_called_once_with(2) -def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): +def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -220,7 +220,7 @@ def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_not_called() -def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch): +def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -245,7 +245,7 @@ def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, m assert any("embedding_cosine_embedding_collection_1_idx" in query for query in sql) -def test_add_texts_uses_execute_values(opengauss_module, monkeypatch): +def test_add_texts_uses_execute_values(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -342,7 +342,7 @@ def test_search_by_full_text_validates_top_k(opengauss_module): vector.search_by_full_text("query", top_k=0) -def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): +def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) lock = MagicMock() @@ -370,7 +370,7 @@ def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch): +def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch: pytest.MonkeyPatch): factory = opengauss_module.OpenGaussFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py index f2ed7cb6fb..b2b004a4de 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py @@ -59,7 +59,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -95,7 +95,7 @@ class TestOpenSearchConfig: assert params["connection_class"].__name__ == "Urllib3HttpConnection" assert params["http_auth"] == ("admin", "password") - def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch): + def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py index 1c2921f85b..80bf20e820 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py @@ -58,7 +58,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -116,7 +116,7 @@ def test_config_validation_for_aws_auth_and_https_fields(opensearch_module): opensearch_module.OpenSearchConfig.model_validate(values) -def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch): +def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" @@ -167,7 +167,7 @@ def test_init_and_create_delegate_calls(opensearch_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch): +def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch: pytest.MonkeyPatch): vector = opensearch_module.OpenSearchVector("Collection_1", _config(opensearch_module, aws_service="es")) docs = [ Document(page_content="a", metadata={"doc_id": "1"}), @@ -308,7 +308,7 @@ def test_search_by_full_text_and_filters(opensearch_module): assert query["query"]["bool"]["filter"] == [{"terms": {"metadata.document_id": ["d-1"]}}] -def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch): +def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch) opensearch_module.redis_client.set.assert_called() -def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch): +def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch: pytest.MonkeyPatch): factory = opensearch_module.OpenSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py index 678cf876b0..46027c7e44 100644 --- a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py +++ b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py @@ -51,7 +51,7 @@ def _connection_with_cursor(cursor): @pytest.fixture -def oracle_module(monkeypatch): +def oracle_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_oracle_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -94,7 +94,7 @@ def test_oracle_config_validation_autonomous_requirements(oracle_module): ) -def test_init_and_get_type(oracle_module, monkeypatch): +def test_init_and_get_type(oracle_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(oracle_module.oracledb, "create_pool", MagicMock(return_value=pool)) vector = oracle_module.OracleVector("collection_1", _config(oracle_module)) @@ -139,7 +139,7 @@ def test_numpy_converters_and_type_handlers(oracle_module): assert out_float64.dtype == numpy.float64 -def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch): +def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): connect = MagicMock(return_value="connection") monkeypatch.setattr(oracle_module.oracledb, "connect", connect) @@ -173,7 +173,7 @@ def test_create_delegates_collection_and_insert(oracle_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch): +def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector.input_type_handler = MagicMock() @@ -279,7 +279,7 @@ def _fake_nltk_module(*, missing_data=False): return nltk, nltk_corpus -def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch): +def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" @@ -305,7 +305,7 @@ def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatc assert "doc_id_0" in en_params -def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch): +def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector._get_connection = MagicMock() @@ -320,7 +320,7 @@ def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeyp vector.search_by_full_text("english query") -def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): +def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -346,7 +346,9 @@ def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): oracle_module.redis_client.set.assert_called_once() -def test_oracle_factory_init_vector_uses_existing_or_generated_collection(oracle_module, monkeypatch): +def test_oracle_factory_init_vector_uses_existing_or_generated_collection( + oracle_module, monkeypatch: pytest.MonkeyPatch +): factory = oracle_module.OracleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py index c3291f7f12..1841e88139 100644 --- a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py +++ b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py @@ -79,7 +79,7 @@ def _patch_both(monkeypatch, module, calls, execute_results=None): @pytest.fixture -def pgvecto_module(monkeypatch): +def pgvecto_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pgvecto_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -126,7 +126,7 @@ def test_collection_base_has_expected_annotations(pgvecto_module): assert {"id", "text", "meta", "vector"} <= set(annotations) -def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): +def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -145,7 +145,7 @@ def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -169,7 +169,7 @@ def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): module.redis_client.set.assert_called() -def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): +def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] runtime_calls = [] @@ -241,7 +241,7 @@ def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): assert any("DROP TABLE IF EXISTS collection_1" in str(args[0]) for args, _ in runtime_calls) -def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): +def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -313,7 +313,7 @@ def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): assert vector.search_by_full_text("hello") == [] -def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch): +def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module factory = module.PGVectoRSFactory() dataset_with_index = SimpleNamespace( diff --git a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py index 99a6e00c16..38e472df63 100644 --- a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py +++ b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py @@ -336,7 +336,7 @@ def test_create_delegates_collection_creation_and_insert(): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch): +def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" @@ -387,7 +387,7 @@ def test_text_get_and_delete_methods(): assert any("DROP TABLE IF EXISTS embedding_collection_1" in sql for sql in executed_sql) -def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch): +def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" cursor = MagicMock() @@ -464,7 +464,7 @@ def test_search_by_full_text_branches_for_bigm_and_standard(): assert "bigm_similarity" in cursor.execute.call_args_list[1].args[0] -def test_pgvector_factory_initializes_expected_collection_name(monkeypatch): +def test_pgvector_factory_initializes_expected_collection_name(monkeypatch: pytest.MonkeyPatch): factory = pgvector_module.PGVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py index 0ed5491fbe..89ee0a47f1 100644 --- a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py +++ b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py @@ -121,7 +121,7 @@ def _build_fake_qdrant_modules(): @pytest.fixture -def qdrant_module(monkeypatch): +def qdrant_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_qdrant_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -170,7 +170,7 @@ def test_init_and_basic_behaviour(qdrant_module): vector.add_texts.assert_called_once() -def test_create_collection_and_add_texts(qdrant_module, monkeypatch): +def test_create_collection_and_add_texts(qdrant_module, monkeypatch: pytest.MonkeyPatch): vector = qdrant_module.QdrantVector("collection_1", "group-1", _config(qdrant_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -288,7 +288,7 @@ def test_search_and_helper_methods(qdrant_module): assert doc.page_content == "doc" -def test_qdrant_factory_paths(qdrant_module, monkeypatch): +def test_qdrant_factory_paths(qdrant_module, monkeypatch: pytest.MonkeyPatch): factory = qdrant_module.QdrantVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py index f97ad1400a..c5f3a9f847 100644 --- a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py +++ b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py @@ -59,7 +59,7 @@ def _patch_both(monkeypatch, module, session): @pytest.fixture -def relyt_module(monkeypatch): +def relyt_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_relyt_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -97,7 +97,7 @@ def test_relyt_config_validation(relyt_module, field, value, message): relyt_module.RelytConfig.model_validate(values) -def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): +def test_init_get_type_and_create_delegate(relyt_module, monkeypatch: pytest.MonkeyPatch): engine = MagicMock() monkeypatch.setattr(relyt_module, "create_engine", MagicMock(return_value=engine)) vector = relyt_module.RelytVector("collection_1", _config(relyt_module), group_id="group-1") @@ -114,7 +114,7 @@ def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -142,7 +142,7 @@ def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): relyt_module.redis_client.set.assert_called_once() -def test_add_texts_and_metadata_queries(relyt_module, monkeypatch): +def test_add_texts_and_metadata_queries(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector._group_id = "group-1" @@ -212,7 +212,7 @@ def test_delete_by_metadata_field_calls_delete_by_uuids(relyt_module): # 3. delete_by_ids translates to uuids -def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): +def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -225,7 +225,7 @@ def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): # 4. text_exists True -def test_text_exists_true(relyt_module, monkeypatch): +def test_text_exists_true(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -236,7 +236,7 @@ def test_text_exists_true(relyt_module, monkeypatch): # 5. text_exists False -def test_text_exists_false(relyt_module, monkeypatch): +def test_text_exists_false(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -284,7 +284,7 @@ def test_search_by_vector_filters_by_score_and_ids(relyt_module): # 8. delete commits session -def test_delete_drops_table(relyt_module, monkeypatch): +def test_delete_drops_table(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -295,7 +295,7 @@ def test_delete_drops_table(relyt_module, monkeypatch): session.execute.assert_called_once() -def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch): +def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch: pytest.MonkeyPatch): factory = relyt_module.RelytVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py index 62a11e0445..49d4b160cf 100644 --- a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py +++ b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py @@ -77,7 +77,7 @@ def _build_fake_tablestore_module(): @pytest.fixture -def tablestore_module(monkeypatch): +def tablestore_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_tablestore_module() monkeypatch.setitem(sys.modules, "tablestore", fake_module) @@ -177,7 +177,7 @@ def test_get_by_ids_text_exists_delete_and_wrappers(tablestore_module): vector._delete_table_if_exist.assert_called_once() -def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch): +def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch: pytest.MonkeyPatch): vector = tablestore_module.TableStoreVector("collection_1", _config(tablestore_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -289,7 +289,7 @@ def test_write_row_and_search_helpers(tablestore_module): assert "score" not in docs[0].metadata -def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch): +def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch: pytest.MonkeyPatch): factory = tablestore_module.TableStoreVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py index 299e40ee1e..e1fe227a29 100644 --- a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py +++ b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py @@ -136,7 +136,7 @@ def _build_fake_tencent_modules(): @pytest.fixture -def tencent_module(monkeypatch): +def tencent_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_tencent_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -187,7 +187,7 @@ def test_config_and_init_paths(tencent_module): assert vector._enable_hybrid_search is False -def test_create_collection_branches(tencent_module, monkeypatch): +def test_create_collection_branches(tencent_module, monkeypatch: pytest.MonkeyPatch): vector = tencent_module.TencentVector("collection_1", _config(tencent_module)) lock = MagicMock() @@ -279,7 +279,7 @@ def test_create_add_delete_and_search_behaviour(tencent_module): vector._client.drop_collection.assert_called_once() -def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch): +def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch: pytest.MonkeyPatch): factory = tencent_module.TencentVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py index bdbed2f740..ed03cbee88 100644 --- a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py +++ b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py @@ -46,7 +46,7 @@ def test_tidb_config_validation(tidb_module, field, value, message): tidb_module.TiDBVectorConfig.model_validate(values) -def test_init_get_type_and_distance_func(tidb_module, monkeypatch): +def test_init_get_type_and_distance_func(tidb_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(tidb_module, "create_engine", MagicMock(return_value="engine")) vector = tidb_module.TiDBVector("collection_1", _config(tidb_module), distance_func="L2") @@ -63,7 +63,7 @@ def test_init_get_type_and_distance_func(tidb_module, monkeypatch): assert vector._get_distance_func() == "VEC_COSINE_DISTANCE" -def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch): +def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch: pytest.MonkeyPatch): fake_tidb_vector = types.ModuleType("tidb_vector") fake_tidb_sqlalchemy = types.ModuleType("tidb_vector.sqlalchemy") @@ -107,7 +107,7 @@ def test_create_calls_collection_and_add_texts(tidb_module): assert vector._dimension == 2 -def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): +def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -127,7 +127,7 @@ def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): tidb_module.redis_client.set.assert_not_called() -def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch): +def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -160,7 +160,7 @@ def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monke tidb_module.redis_client.set.assert_called_once() -def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): +def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch: pytest.MonkeyPatch): class _InsertStmt: def __init__(self, table): self.table = table @@ -198,7 +198,7 @@ def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): @pytest.fixture -def tidb_vector_with_session(tidb_module, monkeypatch): +def tidb_vector_with_session(tidb_module, monkeypatch: pytest.MonkeyPatch): vector = tidb_module.TiDBVector.__new__(tidb_module.TiDBVector) vector._collection_name = "collection_1" vector._engine = MagicMock() @@ -354,7 +354,7 @@ def test_delete_by_metadata_field_does_nothing_when_no_ids(tidb_module): # Test search_by_vector filters and scores -def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): +def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = [ ('{"doc_id":"id-1","document_id":"d-1"}', "text-1", 0.2), @@ -392,7 +392,7 @@ def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): # Test delete drops table -def test_delete_drops_table(tidb_module, monkeypatch): +def test_delete_drops_table(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = None @@ -413,7 +413,7 @@ def test_delete_drops_table(tidb_module, monkeypatch): assert "DROP TABLE IF EXISTS collection_1" in drop_sql -def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch): +def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch: pytest.MonkeyPatch): factory = tidb_module.TiDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py index a884275c89..55d27ad264 100644 --- a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py +++ b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py @@ -36,7 +36,7 @@ def _build_fake_upstash_module(): @pytest.fixture -def upstash_module(monkeypatch): +def upstash_module(monkeypatch: pytest.MonkeyPatch): # Remove patched modules if present for modname in ["upstash_vector", "dify_vdb_upstash.upstash_vector"]: if modname in sys.modules: @@ -65,7 +65,7 @@ def test_upstash_config_validation(upstash_module, field, value, message): upstash_module.UpstashVectorConfig.model_validate(values) -def test_init_get_type_and_dimension(upstash_module, monkeypatch): +def test_init_get_type_and_dimension(upstash_module, monkeypatch: pytest.MonkeyPatch): vector = upstash_module.UpstashVector("collection_1", _config(upstash_module)) assert vector.get_type() == upstash_module.VectorType.UPSTASH @@ -162,7 +162,7 @@ def test_search_by_vector_filter_threshold_and_delete(upstash_module): vector.index.reset.assert_called_once() -def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch): +def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch: pytest.MonkeyPatch): factory = upstash_module.UpstashVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py index 4dfb956c00..32f47c67ed 100644 --- a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py +++ b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def vastbase_module(monkeypatch): +def vastbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -93,7 +93,7 @@ def test_vastbase_config_rejects_invalid_connection_window(vastbase_module): ) -def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): +def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(vastbase_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -114,7 +114,7 @@ def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): pool.putconn.assert_called_once_with(conn) -def test_create_and_add_texts(vastbase_module, monkeypatch): +def test_create_and_add_texts(vastbase_module, monkeypatch: pytest.MonkeyPatch): vector = vastbase_module.VastbaseVector.__new__(vastbase_module.VastbaseVector) vector.table_name = "embedding_collection_1" vector._create_collection = MagicMock() @@ -205,7 +205,7 @@ def test_search_by_vector_and_full_text(vastbase_module): assert full_docs[0].page_content == "full-text" -def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch): +def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -240,7 +240,7 @@ def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeyp vastbase_module.redis_client.set.assert_called() -def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch): +def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch: pytest.MonkeyPatch): factory = vastbase_module.VastbaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py index 544b8163be..6559ad97d2 100644 --- a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py +++ b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py @@ -79,7 +79,7 @@ def _build_fake_vikingdb_modules(): @pytest.fixture -def vikingdb_module(monkeypatch): +def vikingdb_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_vikingdb_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -117,7 +117,7 @@ def test_init_get_type_and_has_checks(vikingdb_module): assert vector._has_index() is False -def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch): +def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -253,7 +253,7 @@ def test_delete_drops_index_and_collection_when_present(vikingdb_module): vector._client.drop_collection.assert_not_called() -def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch): +def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch: pytest.MonkeyPatch): factory = vikingdb_module.VikingDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -293,7 +293,9 @@ def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, mo ("VIKINGDB_SCHEME", "VIKINGDB_SCHEME should not be None"), ], ) -def test_vikingdb_factory_raises_when_required_config_missing(vikingdb_module, monkeypatch, field, message): +def test_vikingdb_factory_raises_when_required_config_missing( + vikingdb_module, monkeypatch: pytest.MonkeyPatch, field, message +): factory = vikingdb_module.VikingDBVectorFactory() dataset = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "existing"}}, index_struct=None diff --git a/api/pyproject.toml b/api/pyproject.toml index d67df06f2c..096c26259a 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -102,6 +102,7 @@ graphon = { git = "https://github.com/QuantumGhost/graphon", branch = "hitl-form default-groups = ["storage", "tools", "vdb-all", "trace-all"] package = false override-dependencies = [ + "litellm>=1.83.7", "pyarrow>=18.0.0", ] diff --git a/api/services/file_service.py b/api/services/file_service.py index f60afe2f19..b683a2f3d4 100644 --- a/api/services/file_service.py +++ b/api/services/file_service.py @@ -107,15 +107,14 @@ class FileService: hash=hashlib.sha3_256(content).hexdigest(), source_url=source_url, ) - # The `UploadFile` ID is generated within its constructor, so flushing to retrieve the ID is unnecessary. - # We can directly generate the `source_url` here before committing. - if not upload_file.source_url: - upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) with self._session_maker(expire_on_commit=False) as session: session.add(upload_file) session.commit() + if not upload_file.source_url: + upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) + return upload_file @staticmethod diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index 3b5e822b90..90131fe98d 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -13,7 +13,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import ConversationFromSource +from models.enums import AppStatus, ConversationFromSource from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -28,7 +28,7 @@ class TestChatMessageApiPermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL return app @pytest.fixture @@ -78,7 +78,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -130,7 +130,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py index 309a0b015a..c4db0d5111 100644 --- a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py +++ b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py @@ -14,7 +14,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import FeedbackFromSource, FeedbackRating +from models.enums import AppStatus, FeedbackFromSource, FeedbackRating from models.model import AppMode, MessageFeedback from services.feedback_service import FeedbackService @@ -29,7 +29,7 @@ class TestFeedbackExportApi: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.name = "Test App" return app @@ -135,7 +135,7 @@ class TestFeedbackExportApi: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -167,7 +167,13 @@ class TestFeedbackExportApi: mock_export_feedbacks.assert_called_once() def test_feedback_export_csv_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in CSV format.""" @@ -202,7 +208,13 @@ class TestFeedbackExportApi: assert "text/csv" in response.content_type def test_feedback_export_json_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in JSON format.""" @@ -246,7 +258,7 @@ class TestFeedbackExportApi: assert "application/json" in response.content_type def test_feedback_export_with_filters( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with various filters.""" @@ -287,7 +299,7 @@ class TestFeedbackExportApi: ) def test_feedback_export_invalid_date_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with invalid date format.""" @@ -312,7 +324,7 @@ class TestFeedbackExportApi: assert "Parameter validation error" in response_json["error"] def test_feedback_export_server_error( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with server error.""" diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index 04945e57a0..ab08c7a6d8 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -11,6 +11,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole +from models.enums import AppStatus from models.model import AppMode from services.app_model_config_service import AppModelConfigService @@ -25,7 +26,7 @@ class TestModelConfigResourcePermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.app_model_config_id = str(uuid.uuid4()) return app @@ -73,7 +74,7 @@ class TestModelConfigResourcePermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py index a876b0c4aa..7d0b575262 100644 --- a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py +++ b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py @@ -1,5 +1,7 @@ from collections.abc import Generator +from pytest_mock import MockerFixture + from core.datasource.datasource_manager import DatasourceManager from core.datasource.entities.datasource_entities import DatasourceMessage from graphon.node_events import StreamCompletedEvent @@ -19,7 +21,7 @@ def _gen_var_stream() -> Generator[DatasourceMessage, None, None]: ) -def test_stream_node_events_accumulates_variables(mocker): +def test_stream_node_events_accumulates_variables(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_var_stream()) events = list( DatasourceManager.stream_node_events( diff --git a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py index 2392084c36..2c1e667c58 100644 --- a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py +++ b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GP: call_depth = 0 -def test_node_integration_minimal_stream(mocker): +def test_node_integration_minimal_stream(mocker: MockerFixture): sys_d = { "sys": { "datasource_type": "online_document", diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index a8e9422c1e..493330e02b 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -2,6 +2,8 @@ import time import uuid from unittest.mock import MagicMock, patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.tools.utils.configuration import ToolParameterConfigurationManager from core.workflow.node_factory import DifyNodeFactory @@ -71,7 +73,7 @@ def init_tool_node(config: dict): return node -def test_tool_variable_invoke(monkeypatch): +def test_tool_variable_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", @@ -106,7 +108,7 @@ def test_tool_variable_invoke(monkeypatch): assert item.node_run_result.outputs.get("text") is not None -def test_tool_mixed_invoke(monkeypatch): +def test_tool_mixed_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", diff --git a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py index 178fc2e4fb..390795486b 100644 --- a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py +++ b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py @@ -11,7 +11,7 @@ from libs import helper as helper_module @pytest.mark.usefixtures("flask_app_with_containers") -def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch): +def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch: pytest.MonkeyPatch): prefix = f"test_rate_limit:{uuid.uuid4().hex}" limiter = helper_module.RateLimiter(prefix=prefix, max_attempts=2, time_window=60) key = limiter._get_key("203.0.113.10") diff --git a/api/tests/test_containers_integration_tests/services/test_agent_service.py b/api/tests/test_containers_integration_tests/services/test_agent_service.py index 00a2f9a59f..cbd939c7a4 100644 --- a/api/tests/test_containers_integration_tests/services/test_agent_service.py +++ b/api/tests/test_containers_integration_tests/services/test_agent_service.py @@ -6,7 +6,7 @@ from faker import Faker from sqlalchemy.orm import Session from core.plugin.impl.exc import PluginDaemonClientSideError -from models import Account +from models import Account, CreatorUserRole from models.enums import ConversationFromSource, MessageFileBelongsTo from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought from services.account_service import AccountService, TenantService @@ -246,7 +246,7 @@ class TestAgentService: tool_input=json.dumps({"test_tool": {"input": "test_input"}}), observation=json.dumps({"test_tool": {"output": "test_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought1) @@ -294,7 +294,7 @@ class TestAgentService: agent_thoughts = self._create_test_agent_thoughts(db_session_with_containers, message) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result structure assert result is not None @@ -370,7 +370,7 @@ class TestAgentService: # Execute the method under test with non-existent message with pytest.raises(ValueError, match="Message not found"): - AgentService.get_agent_logs(app, str(conversation.id), fake.uuid4()) + AgentService.get_agent_logs(app, conversation.id, fake.uuid4()) def test_get_agent_logs_with_end_user( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -451,7 +451,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -523,7 +523,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -561,14 +561,14 @@ class TestAgentService: tool_input=json.dumps({"error_tool": {"input": "test_input"}}), observation=json.dumps({"error_tool": {"output": "error_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_error) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -592,7 +592,7 @@ class TestAgentService: conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -654,7 +654,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="App model config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_get_agent_logs_agent_config_not_found( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -673,7 +673,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="Agent config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_list_agent_providers_success( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -687,7 +687,7 @@ class TestAgentService: app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) # Execute the method under test - result = AgentService.list_agent_providers(str(account.id), str(app.tenant_id)) + result = AgentService.list_agent_providers(account.id, app.tenant_id) # Verify the result assert result is not None @@ -696,7 +696,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(str(app.tenant_id)) + mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(app.tenant_id) def test_get_agent_provider_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ @@ -710,7 +710,7 @@ class TestAgentService: provider_name = "test_provider" # Execute the method under test - result = AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + result = AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) # Verify the result assert result is not None @@ -718,7 +718,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(str(app.tenant_id), provider_name) + mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(app.tenant_id, provider_name) def test_get_agent_provider_plugin_error( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -740,7 +740,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match=error_message): - AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) def test_get_agent_logs_with_complex_tool_data( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -796,14 +796,14 @@ class TestAgentService: {"tool1": {"output1": "result1"}, "tool2": {"output2": "result2"}, "tool3": {"output3": "result3"}} ), tokens=100, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(complex_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -891,14 +891,14 @@ class TestAgentService: observation=json.dumps({"file_tool": {"output": "test_output"}}), message_files=json.dumps(["file1", "file2"]), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_files) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -926,7 +926,7 @@ class TestAgentService: mock_external_service_dependencies["current_user"].timezone = "Asia/Shanghai" # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -960,14 +960,14 @@ class TestAgentService: tool_input="", # Empty input observation="", # Empty observation tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(empty_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -1001,14 +1001,14 @@ class TestAgentService: tool_input="invalid json", # Malformed JSON observation="invalid json", # Malformed JSON tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(malformed_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result - should handle malformed JSON gracefully assert result is not None diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py index 7c5d2390ba..a5ec06dc13 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -198,7 +198,7 @@ class TestAppDslService: def test_check_version_compatibility_newer_version_returns_pending(self): assert _check_version_compatibility("99.0.0") == ImportStatus.PENDING - def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch): + def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(app_dsl_service, "CURRENT_DSL_VERSION", "1.0.0") assert _check_version_compatibility("0.9.9") == ImportStatus.PENDING @@ -272,7 +272,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Missing app data" in result.error - def test_import_app_yaml_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): def bad_safe_load(_content: str): raise yaml.YAMLError("bad") @@ -287,7 +289,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert result.error.startswith("Invalid YAML format:") - def test_import_app_unexpected_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_unexpected_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( AppDslService, "_create_or_update_app", @@ -305,7 +309,9 @@ class TestAppDslService: # ── Import: YAML URL ────────────────────────────────────────────── - def test_import_app_yaml_url_fetch_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_fetch_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( app_dsl_service.ssrf_proxy, "get", @@ -321,7 +327,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Error fetching YAML from URL: boom" in result.error - def test_import_app_yaml_url_empty_content_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_empty_content_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"" response.raise_for_status.return_value = None @@ -336,7 +344,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Empty content" in result.error - def test_import_app_yaml_url_file_too_large_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_file_too_large_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"x" * (DSL_MAX_SIZE + 1) response.raise_for_status.return_value = None @@ -379,7 +389,9 @@ class TestAppDslService: assert result.imported_dsl_version == "99.0.0" assert requested_urls == [yaml_url] - def test_import_app_yaml_url_github_blob_rewrites_to_raw(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_github_blob_rewrites_to_raw( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): yaml_url = "https://github.com/acme/repo/blob/main/app.yml" raw_url = "https://raw.githubusercontent.com/acme/repo/main/app.yml" yaml_bytes = _pending_yaml_content() @@ -491,7 +503,7 @@ class TestAppDslService: @pytest.mark.parametrize("has_workflow", [True, False]) def test_import_app_legacy_versions_extract_dependencies( - self, db_session_with_containers: Session, monkeypatch, has_workflow: bool + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch, has_workflow: bool ): monkeypatch.setattr( AppDslService, @@ -554,7 +566,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "expired" in result.error - def test_confirm_import_success_deletes_redis_key(self, db_session_with_containers: Session, monkeypatch): + def test_confirm_import_success_deletes_redis_key( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" @@ -614,7 +628,9 @@ class TestAppDslService: result = service.check_dependencies(app_model=app_model) assert result.leaked_dependencies == [] - def test_check_dependencies_calls_analysis_service(self, db_session_with_containers: Session, monkeypatch): + def test_check_dependencies_calls_analysis_service( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): app_id = str(uuid4()) pending = CheckDependenciesPendingData(dependencies=[], app_id=app_id) redis_client.setex( @@ -665,7 +681,9 @@ class TestAppDslService: with pytest.raises(ValueError, match="loss app mode"): service._create_or_update_app(app=None, data={"app": {}}, account=_account_mock()) - def test_create_or_update_app_existing_app_updates_fields(self, db_session_with_containers: Session, monkeypatch): + def test_create_or_update_app_existing_app_updates_fields( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): fixed_now = object() monkeypatch.setattr(app_dsl_service, "naive_utc_now", lambda: fixed_now) @@ -778,8 +796,8 @@ class TestAppDslService: service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing model_config"): service._create_or_update_app( - app=_app_stub(mode=AppMode.CHAT.value), - data={"app": {"mode": AppMode.CHAT.value}}, + app=_app_stub(mode=AppMode.CHAT), + data={"app": {"mode": AppMode.CHAT}}, account=_account_mock(), ) @@ -794,7 +812,7 @@ class TestAppDslService: service._create_or_update_app( app=app, data={ - "app": {"mode": AppMode.CHAT.value}, + "app": {"mode": AppMode.CHAT}, "model_config": {"model": {"provider": "openai"}}, }, account=account, @@ -807,14 +825,14 @@ class TestAppDslService: service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid app mode"): service._create_or_update_app( - app=_app_stub(mode=AppMode.RAG_PIPELINE.value), - data={"app": {"mode": AppMode.RAG_PIPELINE.value}}, + app=_app_stub(mode=AppMode.RAG_PIPELINE), + data={"app": {"mode": AppMode.RAG_PIPELINE}}, account=_account_mock(), ) # ── Export ───────────────────────────────────────────────────────── - def test_export_dsl_delegates_by_mode(self, monkeypatch): + def test_export_dsl_delegates_by_mode(self, monkeypatch: pytest.MonkeyPatch): workflow_calls: list[bool] = [] model_calls: list[bool] = [] monkeypatch.setattr( @@ -836,14 +854,14 @@ class TestAppDslService: assert workflow_calls == [True] chat_app = _app_stub( - mode=AppMode.CHAT.value, + mode=AppMode.CHAT, icon_type="emoji", app_model_config=SimpleNamespace(to_dict=lambda: {"agent_mode": {"tools": []}}), ) AppDslService.export_dsl(chat_app) assert model_calls == [True] - def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch): + def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_append_workflow_export_data", @@ -1011,7 +1029,7 @@ class TestAppDslService: # ── Workflow Export Data ─────────────────────────────────────────── - def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch): + def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch: pytest.MonkeyPatch): workflow_dict = { "graph": { "nodes": [ @@ -1111,7 +1129,7 @@ class TestAppDslService: assert nodes[5]["data"]["subscription_id"] == "" assert export_data["dependencies"] == [{"tenant": _DEFAULT_TENANT_ID, "dep": "dep-1"}] - def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch): + def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch: pytest.MonkeyPatch): workflow_service = MagicMock() workflow_service.get_draft_workflow.return_value = None monkeypatch.setattr(app_dsl_service, "WorkflowService", lambda: workflow_service) @@ -1126,7 +1144,7 @@ class TestAppDslService: # ── Model Config Export Data ────────────────────────────────────── - def test_append_model_config_export_data_filters_credential_id(self, monkeypatch): + def test_append_model_config_export_data_filters_credential_id(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_extract_dependencies_from_model_config", @@ -1160,7 +1178,7 @@ class TestAppDslService: # ── Dependency Extraction ───────────────────────────────────────── - def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_tool_dependency", @@ -1230,7 +1248,7 @@ class TestAppDslService: "model:m4", ] - def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.ToolNodeData, "model_validate", @@ -1241,7 +1259,7 @@ class TestAppDslService: ) assert deps == [] - def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch): + def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1264,7 +1282,7 @@ class TestAppDslService: ) assert deps == ["model:p1", "model:p2", "tool:t1"] - def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1278,7 +1296,7 @@ class TestAppDslService: def test_get_leaked_dependencies_empty_returns_empty(self): assert AppDslService.get_leaked_dependencies(_DEFAULT_TENANT_ID, []) == [] - def test_get_leaked_dependencies_delegates(self, monkeypatch): + def test_get_leaked_dependencies_delegates(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "get_leaked_dependencies", @@ -1289,7 +1307,7 @@ class TestAppDslService: # ── Encryption/Decryption ───────────────────────────────────────── - def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch): + def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch: pytest.MonkeyPatch): tenant_id = _DEFAULT_TENANT_ID dataset_uuid = "00000000-0000-0000-0000-000000000000" @@ -1314,7 +1332,7 @@ class TestAppDslService: value = "00000000-0000-0000-0000-000000000000" assert AppDslService.decrypt_dataset_id(encrypted_data=value, tenant_id=_DEFAULT_TENANT_ID) == value - def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", @@ -1322,7 +1340,7 @@ class TestAppDslService: ) assert AppDslService.decrypt_dataset_id(encrypted_data="not-base64", tenant_id=_DEFAULT_TENANT_ID) is None - def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", diff --git a/api/tests/test_containers_integration_tests/services/test_app_service.py b/api/tests/test_containers_integration_tests/services/test_app_service.py index b695ae9fd9..837b63d1ea 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_service.py @@ -6,6 +6,7 @@ from sqlalchemy.orm import Session from constants.model_template import default_app_templates from models import Account +from models.enums import AppStatus, CustomizeTokenStrategy from models.model import App, IconType, Site from services.account_service import AccountService, TenantService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -1079,9 +1080,9 @@ class TestAppService: site.app_id = app.id site.code = fake.postalcode() site.title = fake.company() - site.status = "normal" + site.status = AppStatus.NORMAL site.default_language = "en-US" - site.customize_token_strategy = "uuid" + site.customize_token_strategy = CustomizeTokenStrategy.UUID db_session_with_containers.add(site) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_conversation_service.py index 8aa10129c1..5f3914eb19 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service.py @@ -10,6 +10,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.enums import ConversationFromSource from models.model import App, Conversation, EndUser, Message, MessageAnnotation @@ -22,7 +23,7 @@ from services.message_service import MessageService class ConversationServiceIntegrationTestDataFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -41,7 +42,7 @@ class ConversationServiceIntegrationTestDataFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -155,7 +156,7 @@ class ConversationServiceIntegrationTestDataFactory: total_price=Decimal(0), currency="USD", status="normal", - invoke_from=InvokeFrom.WEB_APP.value, + invoke_from=InvokeFrom.WEB_APP, from_source=ConversationFromSource.API if isinstance(user, EndUser) else ConversationFromSource.CONSOLE, from_end_user_id=user.id if isinstance(user, EndUser) else None, from_account_id=user.id if isinstance(user, Account) else None, diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py index 6c292dbc4b..853630ad65 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py @@ -25,7 +25,7 @@ from services.errors.conversation import ( class ConversationServiceVariableIntegrationFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py index 2bec703f0c..0c089e506b 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py @@ -6,6 +6,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound from core.rag.index_processor.constant.index_type import IndexStructureType @@ -119,13 +120,13 @@ def current_user_mock(): yield current_user -def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers): +def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_document(dataset.id, None) is None -def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers): +def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset) @@ -135,7 +136,7 @@ def test_get_document_queries_by_dataset_and_document_id(db_session_with_contain assert result.id == document.id -def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers): +def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) result = DocumentService.get_documents_by_ids(dataset.id, []) @@ -143,7 +144,7 @@ def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_cont assert result == [] -def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers): +def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) doc_a = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, name="a.txt") doc_b = DocumentServiceIntegrationFactory.create_document( @@ -158,13 +159,13 @@ def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers assert {document.id for document in result} == {doc_a.id, doc_b.id} -def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers): +def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.update_documents_need_summary(dataset.id, []) == 0 -def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers): +def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) paragraph_doc = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -195,7 +196,7 @@ def test_update_documents_need_summary_updates_matching_non_qa_documents(db_sess assert refreshed_qa.need_summary is True -def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers): +def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -215,7 +216,7 @@ def test_get_document_download_url_uses_signed_url_helper(db_session_with_contai get_url.assert_called_once_with(upload_file_id=upload_file.id, as_attachment=True) -def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -232,7 +233,9 @@ def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type ) -def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -248,7 +251,7 @@ def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file ) -def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -265,7 +268,9 @@ def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_sessio assert result == "99" -def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -278,7 +283,7 @@ def test_get_upload_file_for_upload_file_document_raises_when_file_service_retur DocumentService._get_upload_file_for_upload_file_document(document) -def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -296,7 +301,9 @@ def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session assert result.id == upload_file.id -def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with pytest.raises(NotFound, match="Document not found"): @@ -307,7 +314,9 @@ def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_doc ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -329,7 +338,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_a ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -345,7 +356,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload ) -def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file_a = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -395,7 +408,7 @@ def test_prepare_document_batch_download_zip_raises_not_found_for_missing_datase def test_prepare_document_batch_download_zip_translates_permission_error_to_forbidden( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -418,7 +431,7 @@ def test_prepare_document_batch_download_zip_translates_permission_error_to_forb def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_order( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -461,7 +474,7 @@ def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_o assert download_name.endswith(".zip") -def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers): +def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) enabled_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -480,7 +493,9 @@ def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_co assert [document.id for document in result] == [enabled_document.id] -def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents(db_session_with_containers): +def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) available_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -501,7 +516,7 @@ def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchive assert [document.id for document in result] == [available_document.id] -def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers): +def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) error_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -526,7 +541,7 @@ def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db assert {document.id for document in result} == {error_document.id, paused_document.id} -def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers): +def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) batch = f"batch-{uuid4()}" matching_document = DocumentServiceIntegrationFactory.create_document( @@ -549,7 +564,7 @@ def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_cont assert [document.id for document in result] == [matching_document.id] -def test_get_document_file_detail_returns_upload_file(db_session_with_containers): +def test_get_document_file_detail_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -563,7 +578,7 @@ def test_get_document_file_detail_returns_upload_file(db_session_with_containers assert result.id == upload_file.id -def test_delete_document_emits_signal_and_commits(db_session_with_containers): +def test_delete_document_emits_signal_and_commits(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -588,7 +603,7 @@ def test_delete_document_emits_signal_and_commits(db_session_with_containers): ) -def test_delete_documents_ignores_empty_input(db_session_with_containers): +def test_delete_documents_ignores_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with patch("services.dataset_service.batch_clean_document_task.delay") as delay: @@ -597,7 +612,7 @@ def test_delete_documents_ignores_empty_input(db_session_with_containers): delay.assert_not_called() -def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers): +def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) dataset.chunk_structure = IndexStructureType.PARAGRAPH_INDEX db_session_with_containers.commit() @@ -637,14 +652,14 @@ def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_wi assert set(args[3]) == {upload_file_a.id, upload_file_b.id} -def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers): +def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, position=3) assert DocumentService.get_documents_position(dataset.id) == 4 -def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers): +def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_documents_position(dataset.id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py index c0047df810..383a5f6374 100644 --- a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py +++ b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py @@ -2,6 +2,7 @@ import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document @@ -58,7 +59,7 @@ def _create_document( return document -def test_build_display_status_filters_available(db_session_with_containers): +def test_build_display_status_filters_available(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) available_doc = _create_document( db_session_with_containers, @@ -97,7 +98,7 @@ def test_build_display_status_filters_available(db_session_with_containers): assert [row.id for row in rows] == [available_doc.id] -def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers): +def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) waiting_doc = _create_document( db_session_with_containers, @@ -121,7 +122,7 @@ def test_apply_display_status_filter_applies_when_status_present(db_session_with assert [row.id for row in rows] == [waiting_doc.id] -def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers): +def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) doc1 = _create_document( db_session_with_containers, diff --git a/api/tests/test_containers_integration_tests/services/test_end_user_service.py b/api/tests/test_containers_integration_tests/services/test_end_user_service.py index 074d448aab..3f611d92f7 100644 --- a/api/tests/test_containers_integration_tests/services/test_end_user_service.py +++ b/api/tests/test_containers_integration_tests/services/test_end_user_service.py @@ -7,6 +7,7 @@ import pytest from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.model import App, DefaultEndUserSessionID, EndUser from services.end_user_service import EndUserService @@ -16,7 +17,7 @@ class TestEndUserServiceFactory: """Factory class for creating test data and mock objects for end user service tests.""" @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -35,7 +36,7 @@ class TestEndUserServiceFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) diff --git a/api/tests/test_containers_integration_tests/services/test_feature_service.py b/api/tests/test_containers_integration_tests/services/test_feature_service.py index f78aeaf984..a678e37b41 100644 --- a/api/tests/test_containers_integration_tests/services/test_feature_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feature_service.py @@ -644,7 +644,7 @@ class TestFeatureService: assert result.max_plugin_package_size == 15728640 # Verify default license status - assert result.license.status.value == "none" + assert result.license.status == "none" assert result.license.expired_at == "" assert result.license.workspaces.enabled is False diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index 3dcd6586e2..a4663450d4 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -23,7 +23,7 @@ class TestFeedbackService: """Test FeedbackService methods.""" @pytest.fixture - def mock_db_session(self, monkeypatch): + def mock_db_session(self, monkeypatch: pytest.MonkeyPatch): """Mock database session.""" mock_session = mock.Mock() monkeypatch.setattr(db, "session", mock_session) diff --git a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py index ce63e7a71a..bfc2af6509 100644 --- a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py +++ b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py @@ -122,7 +122,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestUnsupportedError): handler.send_test(context=MagicMock(), method=MagicMock()) - def test_send_test_feature_disabled(self, monkeypatch): + def test_send_test_feature_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -137,7 +137,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Email delivery is not available"): handler.send_test(context=context, method=method) - def test_send_test_mail_not_inited(self, monkeypatch): + def test_send_test_mail_not_inited(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -154,7 +154,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Mail client is not initialized."): handler.send_test(context=context, method=method) - def test_send_test_no_recipients(self, monkeypatch): + def test_send_test_no_recipients(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -173,7 +173,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="No recipients configured"): handler.send_test(context=context, method=method) - def test_send_test_success(self, monkeypatch): + def test_send_test_success(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -209,7 +209,7 @@ class TestEmailDeliveryTestHandler: assert kwargs["to"] == "test@example.com" assert "RENDERED_Subj" in kwargs["subject"] - def test_send_test_sanitizes_subject(self, monkeypatch): + def test_send_test_sanitizes_subject(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", diff --git a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py index 44e5a82868..52ebc0131f 100644 --- a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py +++ b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest +from sqlalchemy.orm import Session from services.message_service import MessageService from tests.test_containers_integration_tests.helpers.execution_extra_content import ( @@ -9,7 +10,7 @@ from tests.test_containers_integration_tests.helpers.execution_extra_content imp @pytest.mark.usefixtures("flask_req_ctx_with_containers") -def test_pagination_returns_extra_contents(db_session_with_containers): +def test_pagination_returns_extra_contents(db_session_with_containers: Session): fixture = create_human_input_message_fixture(db_session_with_containers) pagination = MessageService.pagination_by_first_id( diff --git a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py index 80289c448a..a8d295e6a9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py @@ -16,7 +16,7 @@ from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from extensions.ext_redis import redis_client -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.create_segment_to_index_task import create_segment_to_index_task @@ -73,7 +73,7 @@ class TestCreateSegmentToIndexTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -82,7 +82,7 @@ class TestCreateSegmentToIndexTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, plan="basic", ) db_session_with_containers.add(tenant) diff --git a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py index a5a3cd10b5..5287cd06db 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py @@ -12,7 +12,7 @@ from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -54,7 +54,7 @@ class _TrackedSessionContext: @pytest.fixture(autouse=True) -def _ensure_testcontainers_db(db_session_with_containers): +def _ensure_testcontainers_db(db_session_with_containers: Session): """Ensure this suite always runs on testcontainers infrastructure.""" return db_session_with_containers @@ -121,12 +121,12 @@ class TestDatasetIndexingTaskIntegration: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.flush() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py index ff72232d12..c4895839c9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py @@ -5,6 +5,7 @@ from faker import Faker from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_account_deletion_task import send_account_deletion_verification_code, send_deletion_success_task @@ -55,7 +56,7 @@ class TestMailAccountDeletionTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py index 8e9da6aaaa..0eec166fe2 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py @@ -18,6 +18,7 @@ from sqlalchemy import delete from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import AccountStatus, TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_email_code_login import send_email_code_login_mail_task @@ -91,7 +92,7 @@ class TestSendEmailCodeLoginMailTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -120,7 +121,7 @@ class TestSendEmailCodeLoginMailTask: tenant = Tenant( name=fake.company(), plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py index f505361727..a452bee9f8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py @@ -31,7 +31,7 @@ from tasks.mail_human_input_delivery_task import dispatch_human_input_email_task @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(HumanInputFormRecipient)) db_session_with_containers.execute(delete(HumanInputDelivery)) db_session_with_containers.execute(delete(HumanInputForm)) @@ -43,7 +43,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_workspace_member(db_session_with_containers): +def _create_workspace_member(db_session_with_containers: Session): account = Account( email="owner@example.com", name="Owner", diff --git a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py index 03c02ea341..204f533978 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py @@ -21,7 +21,7 @@ from tasks.remove_app_and_related_data_task import ( @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(WorkflowDraftVariable)) db_session_with_containers.execute(delete(WorkflowDraftVariableFile)) db_session_with_containers.execute(delete(UploadFile)) @@ -30,7 +30,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_tenant_and_app(db_session_with_containers): +def _create_tenant_and_app(db_session_with_containers: Session): tenant = Tenant(name=f"test_tenant_{uuid.uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..62d3d79cf1 --- /dev/null +++ b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py @@ -0,0 +1,103 @@ +"""Unit tests for the Markdown API docs generator.""" + +import importlib.util +import sys +from pathlib import Path + + +def _load_generate_swagger_markdown_docs_module(): + api_dir = Path(__file__).resolve().parents[3] + script_path = api_dir / "dev" / "generate_swagger_markdown_docs.py" + + spec = importlib.util.spec_from_file_location("generate_swagger_markdown_docs", script_path) + assert spec + assert spec.loader + + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) # type: ignore[attr-defined] + return module + + +def test_generate_markdown_docs_keeps_split_docs_and_merges_fastopenapi_into_console(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "openapi" + markdown_dir = tmp_path / "markdown" + stale_combined_doc = markdown_dir / "api-reference.md" + markdown_dir.mkdir() + stale_combined_doc.write_text("stale", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n\n## Routes\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + written_paths = module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert [path.name for path in written_paths] == [ + "console-swagger.md", + "web-swagger.md", + "service-swagger.md", + ] + assert not stale_combined_doc.exists() + assert not list(swagger_dir.glob("*.json")) + + console_markdown = (markdown_dir / "console-swagger.md").read_text(encoding="utf-8") + assert "## FastOpenAPI Preview (OpenAPI 3.0)" in console_markdown + assert "### fastopenapi-console-openapi" in console_markdown + assert "#### Routes" in console_markdown + assert "FastOpenAPI Preview" not in (markdown_dir / "web-swagger.md").read_text(encoding="utf-8") + assert "FastOpenAPI Preview" not in (markdown_dir / "service-swagger.md").read_text(encoding="utf-8") + + +def test_generate_markdown_docs_only_removes_generated_specs_from_separate_swagger_dir(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "swagger" + markdown_dir = tmp_path / "markdown" + swagger_dir.mkdir() + existing_file = swagger_dir / "existing.txt" + existing_file.write_text("keep me", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert existing_file.read_text(encoding="utf-8") == "keep me" + assert not list(swagger_dir.glob("*.json")) diff --git a/api/tests/unit_tests/commands/test_generate_swagger_specs.py b/api/tests/unit_tests/commands/test_generate_swagger_specs.py index e77e875081..79a577087d 100644 --- a/api/tests/unit_tests/commands/test_generate_swagger_specs.py +++ b/api/tests/unit_tests/commands/test_generate_swagger_specs.py @@ -6,6 +6,16 @@ import sys from pathlib import Path +def _walk_values(value): + yield value + if isinstance(value, dict): + for child in value.values(): + yield from _walk_values(child) + elif isinstance(value, list): + for child in value: + yield from _walk_values(child) + + def _load_generate_swagger_specs_module(): api_dir = Path(__file__).resolve().parents[3] script_path = api_dir / "dev" / "generate_swagger_specs.py" @@ -35,3 +45,32 @@ def test_generate_specs_writes_console_web_and_service_swagger_files(tmp_path): payload = json.loads(path.read_text(encoding="utf-8")) assert payload["swagger"] == "2.0" assert "paths" in payload + + +def test_generate_specs_writes_swagger_with_resolvable_references_and_no_nulls(tmp_path): + module = _load_generate_swagger_specs_module() + + written_paths = module.generate_specs(tmp_path) + + for path in written_paths: + payload = json.loads(path.read_text(encoding="utf-8")) + definitions = payload["definitions"] + refs = { + item["$ref"].removeprefix("#/definitions/") + for item in _walk_values(payload) + if isinstance(item, dict) and isinstance(item.get("$ref"), str) + } + + assert refs <= set(definitions) + assert all(value is not None for value in _walk_values(payload)) + + +def test_generate_specs_is_idempotent(tmp_path): + module = _load_generate_swagger_specs_module() + + first_paths = module.generate_specs(tmp_path / "first") + second_paths = module.generate_specs(tmp_path / "second") + + assert [path.name for path in first_paths] == [path.name for path in second_paths] + for first_path, second_path in zip(first_paths, second_paths): + assert first_path.read_text(encoding="utf-8") == second_path.read_text(encoding="utf-8") diff --git a/api/tests/unit_tests/controllers/common/test_helpers.py b/api/tests/unit_tests/controllers/common/test_helpers.py index 59c463177c..376a7a90c5 100644 --- a/api/tests/unit_tests/controllers/common/test_helpers.py +++ b/api/tests/unit_tests/controllers/common/test_helpers.py @@ -57,7 +57,7 @@ class TestGuessFileInfoFromResponse: (False, "bin"), ], ) - def test_generated_filename_when_missing(self, monkeypatch, magic_available, expected_ext): + def test_generated_filename_when_missing(self, monkeypatch: pytest.MonkeyPatch, magic_available, expected_ext): if magic_available: if helpers.magic is None: pytest.skip("python-magic is not installed, cannot run 'magic_available=True' test variant") @@ -155,7 +155,7 @@ class TestMagicImportWarnings: ) def test_magic_import_warning_per_platform( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, platform_name, expected_message, ): diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index 56c8160f02..6cf36e3bce 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -17,6 +17,14 @@ class ProductModel(BaseModel): price: float +class ChildModel(BaseModel): + value: str + + +class ParentModel(BaseModel): + child: ChildModel + + @pytest.fixture(autouse=True) def mock_console_ns(): """Mock the console_ns to avoid circular imports during test collection.""" @@ -64,6 +72,22 @@ def test_register_schema_model_passes_schema_from_pydantic(): assert schema == expected_schema +def test_register_schema_model_promotes_nested_pydantic_definitions(): + from controllers.common.schema import DEFAULT_REF_TEMPLATE_SWAGGER_2_0, register_schema_model + + namespace = MagicMock(spec=Namespace) + + register_schema_model(namespace, ParentModel) + + called_schemas = {call.args[0]: call.args[1] for call in namespace.schema_model.call_args_list} + parent_schema = ParentModel.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + + assert set(called_schemas) == {"ParentModel", "ChildModel"} + assert "$defs" not in called_schemas["ParentModel"] + assert called_schemas["ParentModel"]["properties"]["child"]["$ref"] == "#/definitions/ChildModel" + assert called_schemas["ChildModel"] == parent_schema["$defs"]["ChildModel"] + + def test_register_schema_models_registers_multiple_models(): from controllers.common.schema import register_schema_models @@ -77,7 +101,7 @@ def test_register_schema_models_registers_multiple_models(): assert called_names == ["UserModel", "ProductModel"] -def test_register_schema_models_calls_register_schema_model(monkeypatch): +def test_register_schema_models_calls_register_schema_model(monkeypatch: pytest.MonkeyPatch): from controllers.common.schema import register_schema_models namespace = MagicMock(spec=Namespace) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py index 412edb9dfe..66d257ee66 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py @@ -68,7 +68,7 @@ def _segment(): ) -def test_get_segment_with_summary(monkeypatch): +def test_get_segment_with_summary(monkeypatch: pytest.MonkeyPatch): segment = _segment() summary = SimpleNamespace(summary_content="summary") diff --git a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py index 09ed2aaf69..4fa5d21493 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -35,7 +36,7 @@ def dataset(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass all decorators on the API method.""" mocker.patch( "controllers.console.datasets.hit_testing.setup_required", @@ -56,7 +57,7 @@ def bypass_decorators(mocker): class TestHitTestingApi: - def test_hit_testing_success(self, app, dataset, dataset_id): + def test_hit_testing_success(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -99,7 +100,7 @@ class TestHitTestingApi: assert "records" in result assert result["records"] == [] - def test_hit_testing_success_with_optional_record_fields(self, app, dataset, dataset_id): + def test_hit_testing_success_with_optional_record_fields(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestHitTestingApi: assert result["query"] == payload["query"] assert result["records"] == records - def test_hit_testing_dataset_not_found(self, app, dataset_id): + def test_hit_testing_dataset_not_found(self, app: Flask, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -175,7 +176,7 @@ class TestHitTestingApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_hit_testing_invalid_args(self, app, dataset, dataset_id): + def test_hit_testing_invalid_args(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py index 0105aacd65..4042190ff6 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -60,7 +61,7 @@ def metadata_id(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass setup/login/license decorators.""" mocker.patch( "controllers.console.datasets.metadata.setup_required", diff --git a/api/tests/unit_tests/controllers/console/datasets/test_website.py b/api/tests/unit_tests/controllers/console/datasets/test_website.py index 9f0da6e76f..9991a0d345 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_website.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_website.py @@ -2,6 +2,7 @@ from unittest.mock import Mock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from controllers.console import console_ns from controllers.console.datasets.error import WebsiteCrawlError @@ -31,7 +32,7 @@ def app(): @pytest.fixture(autouse=True) -def bypass_auth_and_setup(mocker): +def bypass_auth_and_setup(mocker: MockerFixture): """Bypass setup/login/account decorators.""" mocker.patch( "controllers.console.datasets.website.login_required", @@ -48,7 +49,7 @@ def bypass_auth_and_setup(mocker): class TestWebsiteCrawlApi: - def test_crawl_success(self, app, mocker): + def test_crawl_success(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestWebsiteCrawlApi: assert status == 200 assert result["job_id"] == "job-1" - def test_crawl_invalid_payload(self, app, mocker): + def test_crawl_invalid_payload(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -113,7 +114,7 @@ class TestWebsiteCrawlApi: with pytest.raises(WebsiteCrawlError, match="invalid payload"): method(api) - def test_crawl_service_error(self, app, mocker): + def test_crawl_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestWebsiteCrawlApi: class TestWebsiteCrawlStatusApi: - def test_get_status_success(self, app, mocker): + def test_get_status_success(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -181,7 +182,7 @@ class TestWebsiteCrawlStatusApi: assert status == 200 assert result["status"] == "completed" - def test_get_status_invalid_provider(self, app, mocker): + def test_get_status_invalid_provider(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -203,7 +204,7 @@ class TestWebsiteCrawlStatusApi: with pytest.raises(WebsiteCrawlError, match="invalid provider"): method(api, job_id) - def test_get_status_service_error(self, app, mocker): + def test_get_status_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py index e358435de4..2cfa938af8 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py @@ -1,6 +1,7 @@ from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from controllers.console.datasets.error import PipelineNotFoundError from controllers.console.datasets.wraps import get_rag_pipeline @@ -16,7 +17,7 @@ class TestGetRagPipeline: with pytest.raises(ValueError, match="missing pipeline_id"): dummy_view() - def test_pipeline_not_found(self, mocker): + def test_pipeline_not_found(self, mocker: MockerFixture): @get_rag_pipeline def dummy_view(**kwargs): return "ok" @@ -34,7 +35,7 @@ class TestGetRagPipeline: with pytest.raises(PipelineNotFoundError): dummy_view(pipeline_id="pipeline-1") - def test_pipeline_found_and_injected(self, mocker): + def test_pipeline_found_and_injected(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) pipeline.id = "pipeline-1" pipeline.tenant_id = "tenant-1" @@ -57,7 +58,7 @@ class TestGetRagPipeline: assert result is pipeline - def test_pipeline_id_removed_from_kwargs(self, mocker): + def test_pipeline_id_removed_from_kwargs(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline @@ -79,7 +80,7 @@ class TestGetRagPipeline: assert result == "ok" - def test_pipeline_id_cast_to_string(self, mocker): + def test_pipeline_id_cast_to_string(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline diff --git a/api/tests/unit_tests/controllers/console/test_admin.py b/api/tests/unit_tests/controllers/console/test_admin.py index 16197fcd0c..27f332ac51 100644 --- a/api/tests/unit_tests/controllers/console/test_admin.py +++ b/api/tests/unit_tests/controllers/console/test_admin.py @@ -4,6 +4,7 @@ import uuid from unittest.mock import Mock, PropertyMock, patch import pytest +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound, Unauthorized from controllers.console.admin import ( @@ -18,7 +19,7 @@ from models.model import App, InstalledApp, RecommendedApp @pytest.fixture(autouse=True) -def bypass_only_edition_cloud(mocker): +def bypass_only_edition_cloud(mocker: MockerFixture): """ Bypass only_edition_cloud decorator by setting EDITION to "CLOUD". """ @@ -29,7 +30,7 @@ def bypass_only_edition_cloud(mocker): @pytest.fixture -def mock_admin_auth(mocker): +def mock_admin_auth(mocker: MockerFixture): """ Provide valid admin authentication for controller tests. """ @@ -44,7 +45,7 @@ def mock_admin_auth(mocker): @pytest.fixture -def mock_console_payload(mocker): +def mock_console_payload(mocker: MockerFixture): payload = { "app_id": str(uuid.uuid4()), "language": "en-US", @@ -62,7 +63,7 @@ def mock_console_payload(mocker): @pytest.fixture -def mock_banner_payload(mocker): +def mock_banner_payload(mocker: MockerFixture): mocker.patch( "flask_restx.namespace.Namespace.payload", new_callable=PropertyMock, @@ -78,7 +79,7 @@ def mock_banner_payload(mocker): @pytest.fixture -def mock_session_factory(mocker): +def mock_session_factory(mocker: MockerFixture): mock_session = Mock() mock_session.execute = Mock() mock_session.add = Mock() @@ -97,7 +98,7 @@ class TestDeleteExploreBannerApi: def setup_method(self): self.api = DeleteExploreBannerApi() - def test_delete_banner_not_found(self, mocker, mock_admin_auth): + def test_delete_banner_not_found(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -106,7 +107,7 @@ class TestDeleteExploreBannerApi: with pytest.raises(NotFound, match="is not found"): self.api.delete(uuid.uuid4()) - def test_delete_banner_success(self, mocker, mock_admin_auth): + def test_delete_banner_success(self, mocker: MockerFixture, mock_admin_auth): mock_banner = Mock() mocker.patch( @@ -126,7 +127,7 @@ class TestInsertExploreBannerApi: def setup_method(self): self.api = InsertExploreBannerApi() - def test_insert_banner_success(self, mocker, mock_admin_auth, mock_banner_payload): + def test_insert_banner_success(self, mocker: MockerFixture, mock_admin_auth, mock_banner_payload): mocker.patch("controllers.console.admin.db.session.add") mocker.patch("controllers.console.admin.db.session.commit") @@ -168,7 +169,7 @@ class TestInsertExploreAppApiDelete: def setup_method(self): self.api = InsertExploreAppApi() - def test_delete_when_not_in_explore(self, mocker, mock_admin_auth): + def test_delete_when_not_in_explore(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.session_factory.create_session", return_value=Mock( @@ -183,7 +184,7 @@ class TestInsertExploreAppApiDelete: assert status == 204 assert response["result"] == "success" - def test_delete_when_in_explore_with_trial_app(self, mocker, mock_admin_auth): + def test_delete_when_in_explore_with_trial_app(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app from explore that has a trial app.""" app_id = uuid.uuid4() @@ -225,7 +226,7 @@ class TestInsertExploreAppApiDelete: assert response["result"] == "success" assert mock_app.is_public is False - def test_delete_with_installed_apps(self, mocker, mock_admin_auth): + def test_delete_with_installed_apps(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app that has installed apps in other tenants.""" app_id = uuid.uuid4() @@ -270,7 +271,7 @@ class TestInsertExploreAppListApi: def setup_method(self): self.api = InsertExploreAppListApi() - def test_app_not_found(self, mocker, mock_admin_auth, mock_console_payload): + def test_app_not_found(self, mocker: MockerFixture, mock_admin_auth, mock_console_payload): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -281,7 +282,7 @@ class TestInsertExploreAppListApi: def test_create_recommended_app( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, ): @@ -318,7 +319,9 @@ class TestInsertExploreAppListApi: assert response["result"] == "success" assert mock_app.is_public is True - def test_update_recommended_app(self, mocker, mock_admin_auth, mock_console_payload, mock_session_factory): + def test_update_recommended_app( + self, mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory + ): mock_app = Mock(spec=App) mock_app.id = "app-id" mock_app.site = None @@ -344,7 +347,7 @@ class TestInsertExploreAppListApi: def test_site_data_overrides_payload( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -381,7 +384,7 @@ class TestInsertExploreAppListApi: def test_create_trial_app_when_can_trial_enabled( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -413,7 +416,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_with_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -450,7 +453,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_without_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, diff --git a/api/tests/unit_tests/controllers/console/test_feature.py b/api/tests/unit_tests/controllers/console/test_feature.py index d8debc1f2c..1711aede61 100644 --- a/api/tests/unit_tests/controllers/console/test_feature.py +++ b/api/tests/unit_tests/controllers/console/test_feature.py @@ -1,3 +1,4 @@ +from pytest_mock import MockerFixture from werkzeug.exceptions import Unauthorized @@ -11,7 +12,7 @@ def unwrap(func): class TestFeatureApi: - def test_get_tenant_features_success(self, mocker): + def test_get_tenant_features_success(self, mocker: MockerFixture): from controllers.console.feature import FeatureApi mocker.patch( @@ -32,7 +33,7 @@ class TestFeatureApi: class TestSystemFeatureApi: - def test_get_system_features_authenticated(self, mocker): + def test_get_system_features_authenticated(self, mocker: MockerFixture): """ current_user.is_authenticated == True """ @@ -56,7 +57,7 @@ class TestSystemFeatureApi: assert result == {"features": {"sys_feature": True}} - def test_get_system_features_unauthenticated(self, mocker): + def test_get_system_features_unauthenticated(self, mocker: MockerFixture): """ current_user.is_authenticated raises Unauthorized """ diff --git a/api/tests/unit_tests/controllers/console/workspace/test_models.py b/api/tests/unit_tests/controllers/console/workspace/test_models.py index 4246e3c04c..3c4acbab44 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_models.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_models.py @@ -32,7 +32,7 @@ class TestDefaultModelApi: with ( app.test_request_context( "/", - query_string={"model_type": ModelType.LLM.value}, + query_string={"model_type": ModelType.LLM}, ), patch( "controllers.console.workspace.models.current_account_with_tenant", @@ -53,7 +53,7 @@ class TestDefaultModelApi: payload = { "model_settings": [ { - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "provider": "openai", "model": "gpt-4", } @@ -77,7 +77,7 @@ class TestDefaultModelApi: method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, ): @@ -113,7 +113,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "load_balancing": { "configs": [{"weight": 1}], "enabled": True, @@ -139,7 +139,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -180,7 +180,7 @@ class TestModelProviderModelCredentialApi: "/", query_string={ "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, }, ), patch( @@ -208,7 +208,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -229,7 +229,7 @@ class TestModelProviderModelCredentialApi: method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, patch("controllers.console.workspace.models.ModelLoadBalancingService") as lb, @@ -248,7 +248,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "123e4567-e89b-12d3-a456-426614174000", } @@ -269,7 +269,7 @@ class TestModelProviderModelCredentialSwitchApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "abc", } @@ -293,7 +293,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -314,7 +314,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -337,7 +337,7 @@ class TestModelProviderModelValidateApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -360,7 +360,7 @@ class TestModelProviderModelValidateApi: payload = { "model": model_name, - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {}, } @@ -412,7 +412,7 @@ class TestParameterAndAvailableModels: ): service_mock.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert "data" in result @@ -442,6 +442,6 @@ class TestParameterAndAvailableModels: ): service.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert result["data"] == [] diff --git a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py index d1b09c3a58..598677faff 100644 --- a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py +++ b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py @@ -189,7 +189,7 @@ class TestGetUserTenant: """Test get_user_tenant decorator""" @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch): """Test that decorator injects tenant_model and user_model into kwargs""" # Arrange @@ -244,7 +244,9 @@ class TestGetUserTenant: protected_view() @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_use_default_session_id_when_user_id_empty(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_use_default_session_id_when_user_id_empty( + self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch + ): """Test that default session ID is used when user_id is empty string""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py index 6dc8f54d42..74c13d50f6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py @@ -340,7 +340,7 @@ class TestConversationAppModeValidation: @pytest.mark.parametrize( "mode", [ - AppMode.CHAT.value, + AppMode.CHAT, AppMode.AGENT_CHAT.value, AppMode.ADVANCED_CHAT.value, ], @@ -365,7 +365,7 @@ class TestConversationAppModeValidation: app raises NotChatAppError. """ app = Mock(spec=App) - app.mode = AppMode.COMPLETION.value + app.mode = AppMode.COMPLETION app_mode = AppMode.value_of(app.mode) assert app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT} @@ -498,7 +498,7 @@ class TestConversationApiController: def test_list_not_chat(self, app) -> None: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations", method="GET"): @@ -531,7 +531,7 @@ class TestConversationApiController: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -546,7 +546,7 @@ class TestConversationDetailApiController: def test_delete_not_chat(self, app) -> None: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -562,7 +562,7 @@ class TestConversationDetailApiController: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -580,7 +580,7 @@ class TestConversationRenameApiController: api = ConversationRenameApi() handler = _unwrap(api.post) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -596,7 +596,7 @@ class TestConversationVariablesApiController: def test_not_chat(self, app) -> None: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1/variables", method="GET"): @@ -612,7 +612,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -645,7 +645,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -671,7 +671,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -697,7 +697,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -731,7 +731,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( diff --git a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py index 3cc444e467..9c310a4f45 100644 --- a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py +++ b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py @@ -3,6 +3,7 @@ from unittest.mock import Mock from uuid import UUID, uuid4 import pytest +from pytest_mock import MockerFixture from controllers.service_api.end_user.end_user import EndUserApi from controllers.service_api.end_user.error import EndUserNotFoundError @@ -21,7 +22,9 @@ class TestEndUserApi: app.tenant_id = str(uuid4()) return app - def test_get_end_user_returns_all_attributes(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_returns_all_attributes( + self, mocker: MockerFixture, resource: EndUserApi, app_model: App + ) -> None: end_user = Mock(spec=EndUser) end_user.id = str(uuid4()) end_user.tenant_id = app_model.tenant_id @@ -54,7 +57,7 @@ class TestEndUserApi: assert result["created_at"].startswith("2024-01-01T00:00:00") assert result["updated_at"].startswith("2024-01-02T00:00:00") - def test_get_end_user_not_found(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_not_found(self, mocker: MockerFixture, resource: EndUserApi, app_model: App) -> None: mocker.patch("controllers.service_api.end_user.end_user.EndUserService.get_end_user_by_id", return_value=None) with pytest.raises(EndUserNotFoundError): diff --git a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py index 9073ae1044..c1a4da8cd3 100644 --- a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py +++ b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py @@ -12,12 +12,13 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.output_parser.cot_output_parser import CotAgentOutputParser @pytest.fixture -def mock_action_class(mocker): +def mock_action_class(mocker: MockerFixture): mock_action = MagicMock() mocker.patch( "core.agent.output_parser.cot_output_parser.AgentScratchpadUnit.Action", diff --git a/api/tests/unit_tests/core/agent/strategy/test_plugin.py b/api/tests/unit_tests/core/agent/strategy/test_plugin.py index e0894f1e90..0fea04845d 100644 --- a/api/tests/unit_tests/core/agent/strategy/test_plugin.py +++ b/api/tests/unit_tests/core/agent/strategy/test_plugin.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.strategy.plugin import PluginAgentStrategy @@ -213,7 +214,9 @@ class TestInvoke: (None, None, "msg"), ], ) - def test_invoke_optional_arguments(self, strategy, mocker, conversation_id, app_id, message_id) -> None: + def test_invoke_optional_arguments( + self, strategy, mocker: MockerFixture, conversation_id, app_id, message_id + ) -> None: mock_manager = MagicMock() mock_manager.invoke = MagicMock(return_value=iter([])) diff --git a/api/tests/unit_tests/core/agent/test_base_agent_runner.py b/api/tests/unit_tests/core/agent/test_base_agent_runner.py index db4b293b16..d5fb853ee3 100644 --- a/api/tests/unit_tests/core/agent/test_base_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_base_agent_runner.py @@ -3,6 +3,7 @@ from decimal import Decimal from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.agent.base_agent_runner as module from core.agent.base_agent_runner import BaseAgentRunner @@ -13,7 +14,7 @@ from core.agent.base_agent_runner import BaseAgentRunner @pytest.fixture -def mock_db_session(mocker): +def mock_db_session(mocker: MockerFixture): session = mocker.MagicMock() mocker.patch.object(module.db, "session", session) return session @@ -41,13 +42,13 @@ def runner(mocker, mock_db_session): class TestRepack: - def test_sets_empty_if_none(self, runner, mocker): + def test_sets_empty_if_none(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = None result = runner._repack_app_generate_entity(entity) assert result.app_config.prompt_template.simple_prompt_template == "" - def test_keeps_existing(self, runner, mocker): + def test_keeps_existing(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = "abc" result = runner._repack_app_generate_entity(entity) @@ -60,7 +61,7 @@ class TestRepack: class TestUpdatePromptTool: - def build_param(self, mocker, **kwargs): + def build_param(self, mocker: MockerFixture, **kwargs): p = mocker.MagicMock() p.form = kwargs.get("form") @@ -75,7 +76,7 @@ class TestUpdatePromptTool: p.required = kwargs.get("required", False) return p - def test_skip_non_llm(self, runner, mocker): + def test_skip_non_llm(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form="NOT_LLM") tool.get_runtime_parameters.return_value = [param] @@ -86,7 +87,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_enum_and_required(self, runner, mocker): + def test_enum_and_required(self, runner, mocker: MockerFixture): option = mocker.MagicMock(value="opt1") param = self.build_param( mocker, @@ -104,7 +105,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert "p1" in result.parameters["required"] - def test_skip_file_type_param(self, runner, mocker): + def test_skip_file_type_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form=module.ToolParameter.ToolParameterForm.LLM) param.type = module.ToolParameter.ToolParameterType.FILE @@ -116,7 +117,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_duplicate_required_not_duplicated(self, runner, mocker): + def test_duplicate_required_not_duplicated(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param( @@ -141,7 +142,7 @@ class TestUpdatePromptTool: class TestCreateAgentThought: - def test_with_files(self, runner, mock_db_session, mocker): + def test_with_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=10) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -149,7 +150,7 @@ class TestCreateAgentThought: assert result == "10" assert runner.agent_thought_count == 1 - def test_without_files(self, runner, mock_db_session, mocker): + def test_without_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=11) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -163,7 +164,7 @@ class TestCreateAgentThought: class TestSaveAgentThought: - def setup_agent(self, mocker): + def setup_agent(self, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;tool2" agent.tool_labels = {} @@ -175,7 +176,7 @@ class TestSaveAgentThought: with pytest.raises(ValueError): runner.save_agent_thought("id", None, None, None, None, None, None, [], None) - def test_full_update(self, runner, mock_db_session, mocker): + def test_full_update(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -210,7 +211,7 @@ class TestSaveAgentThought: assert agent.tokens == 3 assert "tool1" in json.loads(agent.tool_labels_str) - def test_label_fallback_when_none(self, runner, mock_db_session, mocker): + def test_label_fallback_when_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) agent.tool = "unknown_tool" mock_db_session.scalar.return_value = agent @@ -220,7 +221,7 @@ class TestSaveAgentThought: labels = json.loads(agent.tool_labels_str) assert "unknown_tool" in labels - def test_json_failure_paths(self, runner, mock_db_session, mocker): + def test_json_failure_paths(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -241,13 +242,13 @@ class TestSaveAgentThought: assert mock_db_session.commit.called - def test_messages_ids_none(self, runner, mock_db_session, mocker): + def test_messages_ids_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent runner.save_agent_thought("id", None, None, None, None, None, None, None, None) assert mock_db_session.commit.called - def test_success_dict_serialization(self, runner, mock_db_session, mocker): + def test_success_dict_serialization(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -273,19 +274,19 @@ class TestSaveAgentThought: class TestOrganizeUserPrompt: - def test_no_files(self, runner, mock_db_session, mocker): + def test_no_files(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_with_files_no_config(self, runner, mock_db_session, mocker): + def test_with_files_no_config(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_image_detail_low_fallback(self, runner, mock_db_session, mocker): + def test_image_detail_low_fallback(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() file_config.image_config = mocker.MagicMock(detail=None) @@ -305,27 +306,27 @@ class TestOrganizeUserPrompt: class TestOrganizeHistory: - def test_empty(self, runner, mock_db_session, mocker): + def test_empty(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) result = runner.organize_agent_history([]) assert result == [] - def test_with_answer_only(self, runner, mock_db_session, mocker): + def test_with_answer_only(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="m1", answer="ans", agent_thoughts=[], app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert any(isinstance(x, module.AssistantPromptMessage) for x in result) - def test_skip_current_message(self, runner, mock_db_session, mocker): + def test_skip_current_message(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="msg_current", agent_thoughts=[], answer="ans", app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert result == [] - def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker): + def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input="invalid", @@ -341,7 +342,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_empty_tool_name_split(self, runner, mock_db_session, mocker): + def test_empty_tool_name_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=";", thought="thinking") msg = mocker.MagicMock(id="m5", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -350,7 +351,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_valid_json_tool_flow(self, runner, mock_db_session, mocker): + def test_valid_json_tool_flow(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=json.dumps({"tool1": {"x": 1}}), @@ -379,7 +380,7 @@ class TestOrganizeHistory: class TestConvertToolToPromptMessageTool: - def test_basic_conversion(self, runner, mocker): + def test_basic_conversion(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") runtime_param = mocker.MagicMock() @@ -404,7 +405,7 @@ class TestConvertToolToPromptMessageTool: prompt_tool, entity = runner._convert_tool_to_prompt_message_tool(tool) assert entity == tool_entity - def test_full_conversion_multiple_params(self, runner, mocker): + def test_full_conversion_multiple_params(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") # LLM param with input_schema override @@ -441,7 +442,7 @@ class TestConvertToolToPromptMessageTool: class TestInitPromptToolsExtended: - def test_agent_tool_branch(self, runner, mocker): + def test_agent_tool_branch(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="agent_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", return_value=(MagicMock(), "entity")) @@ -449,7 +450,7 @@ class TestInitPromptToolsExtended: tools, prompts = runner._init_prompt_tools() assert "agent_tool" in tools - def test_exception_in_conversion(self, runner, mocker): + def test_exception_in_conversion(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="bad_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", side_effect=Exception) @@ -464,7 +465,7 @@ class TestInitPromptToolsExtended: class TestAdditionalCoverage: - def test_update_prompt_with_input_schema(self, runner, mocker): + def test_update_prompt_with_input_schema(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = mocker.MagicMock() @@ -487,7 +488,7 @@ class TestAdditionalCoverage: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"]["p1"]["type"] == "number" - def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker): + def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {"tool1": {"en_US": "existing"}} @@ -498,7 +499,7 @@ class TestAdditionalCoverage: labels = json.loads(agent.tool_labels_str) assert labels["tool1"]["en_US"] == "existing" - def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker): + def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -508,7 +509,7 @@ class TestAdditionalCoverage: runner.save_agent_thought("id", None, None, None, None, "meta_string", None, [], None) assert agent.tool_meta_str == "meta_string" - def test_convert_dataset_retriever_tool(self, runner, mocker): + def test_convert_dataset_retriever_tool(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -525,7 +526,7 @@ class TestAdditionalCoverage: prompt = runner._convert_dataset_retriever_tool_to_prompt_message_tool(ds_tool) assert prompt is not None - def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker): + def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() @@ -544,7 +545,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_user_prompt(msg) assert result is not None - def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker): + def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=None, thought="thinking") msg = mocker.MagicMock(id="m3", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -554,7 +555,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker): + def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1;tool2", tool_input=json.dumps({"tool1": {}, "tool2": {}}), @@ -572,7 +573,7 @@ class TestAdditionalCoverage: # ================= Additional Surgical Coverage ================= - def test_convert_tool_select_enum_branch(self, runner, mocker): + def test_convert_tool_select_enum_branch(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -599,7 +600,7 @@ class TestAdditionalCoverage: class TestConvertDatasetRetrieverTool: - def test_required_param_added(self, runner, mocker): + def test_required_param_added(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -619,7 +620,7 @@ class TestConvertDatasetRetrieverTool: class TestBaseAgentRunnerInit: - def test_init_sets_stream_tool_call_and_files(self, mocker): + def test_init_sets_stream_tool_call_and_files(self, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = 2 mocker.patch.object(module.db, "session", session) @@ -662,7 +663,7 @@ class TestBaseAgentRunnerInit: class TestBaseAgentRunnerCoverage: - def test_convert_tool_skips_non_llm_param(self, runner, mocker): + def test_convert_tool_skips_non_llm_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -680,7 +681,7 @@ class TestBaseAgentRunnerCoverage: assert prompt_tool.parameters["properties"] == {} - def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker): + def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker: MockerFixture): dataset_tool = mocker.MagicMock() dataset_tool.entity.identity.name = "ds" runner.dataset_tools = [dataset_tool] @@ -692,7 +693,7 @@ class TestBaseAgentRunnerCoverage: assert tools["ds"] == dataset_tool assert len(prompt_tools) == 1 - def test_update_prompt_message_tool_select_enum(self, runner, mocker): + def test_update_prompt_message_tool_select_enum(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() option1 = mocker.MagicMock(value="A") @@ -716,7 +717,7 @@ class TestBaseAgentRunnerCoverage: assert result.parameters["properties"]["select_param"]["enum"] == ["A", "B"] - def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker): + def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -754,7 +755,7 @@ class TestBaseAgentRunnerCoverage: assert isinstance(agent.observation, str) assert isinstance(agent.tool_meta_str, str) - def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker): + def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;;" agent.tool_labels = {} @@ -768,7 +769,7 @@ class TestBaseAgentRunnerCoverage: labels = json.loads(agent.tool_labels_str) assert "" not in labels - def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker): + def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) @@ -778,7 +779,7 @@ class TestBaseAgentRunnerCoverage: assert system_message in result - def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker): + def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=None, diff --git a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py index cde8820e00..314305d371 100644 --- a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py @@ -2,6 +2,7 @@ import json from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.cot_agent_runner import CotAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -25,7 +26,7 @@ class DummyRunner(CotAgentRunner): @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Prevent BaseAgentRunner __init__ from hitting database mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.organize_agent_history", @@ -165,7 +166,7 @@ class TestHandleInvokeAction: response, meta = runner._handle_invoke_action(action, {}, []) assert "there is not a tool named" in response - def test_tool_with_json_string_args(self, runner, mocker): + def test_tool_with_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input=json.dumps({"a": 1})) tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -180,7 +181,7 @@ class TestHandleInvokeAction: class TestOrganizeHistoricPromptMessages: - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch( "core.agent.cot_agent_runner.AgentHistoryPromptTransform.get_prompt", return_value=[], @@ -190,7 +191,7 @@ class TestOrganizeHistoricPromptMessages: class TestRun: - def test_run_handles_empty_parser_output(self, runner, mocker): + def test_run_handles_empty_parser_output(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -202,7 +203,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert isinstance(results, list) - def test_run_with_action_and_tool_invocation(self, runner, mocker): + def test_run_with_action_and_tool_invocation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -223,7 +224,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_respects_max_iteration_boundary(self, runner, mocker): + def test_run_respects_max_iteration_boundary(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 1 message = MagicMock() message.id = "msg-id" @@ -245,7 +246,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_basic_flow(self, runner, mocker): + def test_run_basic_flow(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -257,7 +258,7 @@ class TestRun: results = list(runner.run(message, "query", {"name": "John"})) assert results - def test_run_max_iteration_error(self, runner, mocker): + def test_run_max_iteration_error(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 0 message = MagicMock() message.id = "msg-id" @@ -272,7 +273,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {})) - def test_run_increase_usage_aggregation(self, runner, mocker): + def test_run_increase_usage_aggregation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" runner.app_config.agent.max_iteration = 2 @@ -329,7 +330,7 @@ class TestRun: assert final_usage.completion_price == 2 assert final_usage.total_price == 4 - def test_run_when_no_action_branch(self, runner, mocker): + def test_run_when_no_action_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -341,7 +342,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "" - def test_run_usage_missing_key_branch(self, runner, mocker): + def test_run_usage_missing_key_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -354,7 +355,7 @@ class TestRun: list(runner.run(message, "query", {})) - def test_run_prompt_tool_update_branch(self, runner, mocker): + def test_run_prompt_tool_update_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -410,7 +411,7 @@ class TestRun: class TestInitReactState: - def test_init_react_state_resets_state(self, runner, mocker): + def test_init_react_state_resets_state(self, runner, mocker: MockerFixture): mocker.patch.object(runner, "_organize_historic_prompt_messages", return_value=["historic"]) runner._agent_scratchpad = ["old"] runner._query = "old" @@ -423,7 +424,7 @@ class TestInitReactState: class TestHandleInvokeActionExtended: - def test_tool_with_invalid_json_string_args(self, runner, mocker): + def test_tool_with_invalid_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input="not-json") tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -457,7 +458,7 @@ class TestFillInputsEdgeCases: class TestOrganizeHistoricPromptMessagesExtended: - def test_user_message_flushes_scratchpad(self, runner, mocker): + def test_user_message_flushes_scratchpad(self, runner, mocker: MockerFixture): from graphon.model_runtime.entities.message_entities import UserPromptMessage user_message = UserPromptMessage(content="Hi") @@ -480,7 +481,7 @@ class TestOrganizeHistoricPromptMessagesExtended: with pytest.raises(NotImplementedError): runner._organize_historic_prompt_messages([]) - def test_agent_history_transform_invocation(self, runner, mocker): + def test_agent_history_transform_invocation(self, runner, mocker: MockerFixture): mock_transform = MagicMock() mock_transform.get_prompt.return_value = [] @@ -495,7 +496,7 @@ class TestOrganizeHistoricPromptMessagesExtended: class TestRunAdditionalBranches: - def test_run_with_no_action_final_answer_empty(self, runner, mocker): + def test_run_with_no_action_final_answer_empty(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -507,7 +508,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert any(hasattr(r, "delta") for r in results) - def test_run_with_final_answer_action_string(self, runner, mocker): + def test_run_with_final_answer_action_string(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -521,7 +522,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "done" - def test_run_with_final_answer_action_dict(self, runner, mocker): + def test_run_with_final_answer_action_dict(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -535,7 +536,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert json.loads(results[-1].delta.message.content) == {"a": 1} - def test_run_with_string_final_answer(self, runner, mocker): + def test_run_with_string_final_answer(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" diff --git a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py index ea8cc8aa86..8e7093fd12 100644 --- a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from pytest_mock import MockerFixture from core.agent.cot_chat_agent_runner import CotChatAgentRunner from graphon.model_runtime.entities.message_entities import TextPromptMessageContent @@ -55,7 +56,7 @@ def runner(): class TestOrganizeSystemPrompt: - def test_organize_system_prompt_success(self, runner, mocker): + def test_organize_system_prompt_success(self, runner, mocker: MockerFixture): first_prompt = "Instruction: {{instruction}}, Tools: {{tools}}, Names: {{tool_names}}" runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt(first_prompt))) @@ -154,7 +155,7 @@ class TestOrganizeUserQuery: class TestOrganizePromptMessages: - def test_no_scratchpad(self, runner, mocker): + def test_no_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -164,7 +165,7 @@ class TestOrganizePromptMessages: assert "query" in result runner._organize_historic_prompt_messages.assert_called_once() - def test_with_final_scratchpad(self, runner, mocker): + def test_with_final_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -177,7 +178,7 @@ class TestOrganizePromptMessages: combined = "".join([m.content for m in assistant_msgs if isinstance(m.content, str)]) assert "Final Answer: done" in combined - def test_with_thought_action_observation(self, runner, mocker): + def test_with_thought_action_observation(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -197,7 +198,7 @@ class TestOrganizePromptMessages: assert "Action: action" in combined assert "Observation: observe" in combined - def test_multiple_units_mixed(self, runner, mocker): + def test_multiple_units_mixed(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) diff --git a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py index 2f5873d865..0d949c357d 100644 --- a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner from graphon.model_runtime.entities.message_entities import ( @@ -74,7 +75,7 @@ class TestOrganizeInstructionPrompt: class TestOrganizeHistoricPrompt: - def test_with_user_and_assistant_string(self, runner, mocker): + def test_with_user_and_assistant_string(self, runner, mocker: MockerFixture): user_msg = UserPromptMessage(content="Hello") assistant_msg = AssistantPromptMessage(content="Hi there") @@ -89,7 +90,7 @@ class TestOrganizeHistoricPrompt: assert "Question: Hello" in result assert "Hi there" in result - def test_assistant_list_with_text_content(self, runner, mocker): + def test_assistant_list_with_text_content(self, runner, mocker: MockerFixture): text_content = TextPromptMessageContent(data="Partial answer") assistant_msg = AssistantPromptMessage(content=[text_content]) @@ -103,7 +104,7 @@ class TestOrganizeHistoricPrompt: assert "Partial answer" in result - def test_assistant_list_with_non_text_content_ignored(self, runner, mocker): + def test_assistant_list_with_non_text_content_ignored(self, runner, mocker: MockerFixture): non_text_content = ImagePromptMessageContent(format="url", mime_type="image/png") assistant_msg = AssistantPromptMessage(content=[non_text_content]) @@ -116,7 +117,7 @@ class TestOrganizeHistoricPrompt: result = runner._organize_historic_prompt() assert result == "" - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch.object( runner, "_organize_historic_prompt_messages", @@ -136,7 +137,7 @@ class TestOrganizePromptMessages: def test_full_flow_with_scratchpad( self, runner, - mocker, + mocker: MockerFixture, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory, @@ -171,7 +172,12 @@ class TestOrganizePromptMessages: assert "Question: What is Python?" in content def test_no_scratchpad( - self, runner, mocker, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory + self, + runner, + mocker: MockerFixture, + dummy_app_config_factory, + dummy_agent_config_factory, + dummy_prompt_entity_factory, ): template = "SYS {{historic_messages}} {{agent_scratchpad}} {{query}}" @@ -198,7 +204,7 @@ class TestOrganizePromptMessages: def test_partial_scratchpad_units( self, runner, - mocker, + mocker: MockerFixture, thought, action, observation, diff --git a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py index 17ab5babcb..3a4347e723 100644 --- a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py @@ -3,6 +3,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.errors import AgentMaxIterationError from core.agent.fc_agent_runner import FunctionCallAgentRunner @@ -68,7 +69,7 @@ class DummyResult: @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Completely bypass BaseAgentRunner __init__ to avoid DB / Flask context mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.__init__", @@ -230,7 +231,7 @@ class TestOrganizeUserQuery: result = runner._organize_user_query(None, []) assert len(result) == 1 - def test_with_files_uses_image_detail_config(self, runner, mocker): + def test_with_files_uses_image_detail_config(self, runner, mocker: MockerFixture): file_content = TextPromptMessageContent(data="file-content") mock_to_prompt = mocker.patch( "core.agent.fc_agent_runner.file_manager.to_prompt_message_content", @@ -352,7 +353,7 @@ class TestRunMethod: assert len(outputs) == 1 assert runner.save_agent_thought.call_args.kwargs["thought"] == "hi" - def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker): + def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") runner.stream_tool_call = True @@ -398,7 +399,7 @@ class TestRunMethod: outputs = list(runner.run(message, "query")) assert len(outputs) >= 1 - def test_run_with_tool_instance_and_files(self, runner, mocker): + def test_run_with_tool_instance_and_files(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") tool_call = MagicMock() diff --git a/api/tests/unit_tests/core/agent/test_plugin_entities.py b/api/tests/unit_tests/core/agent/test_plugin_entities.py index 9955190aca..aa3098a2a1 100644 --- a/api/tests/unit_tests/core/agent/test_plugin_entities.py +++ b/api/tests/unit_tests/core/agent/test_plugin_entities.py @@ -9,6 +9,7 @@ mocking; ensure entity invariants and validation rules remain stable. import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.agent.plugin_entities import ( AgentFeature, @@ -28,12 +29,12 @@ from core.tools.entities.tool_entities import ToolIdentity, ToolProviderIdentity @pytest.fixture -def mock_identity(mocker): +def mock_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyIdentity) @pytest.fixture -def mock_provider_identity(mocker): +def mock_provider_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyProviderIdentity) @@ -47,7 +48,7 @@ class TestAgentStrategyParameterType: "enum_member", list(AgentStrategyParameter.AgentStrategyParameterType), ) - def test_as_normal_type_calls_external_function(self, mocker, enum_member) -> None: + def test_as_normal_type_calls_external_function(self, mocker: MockerFixture, enum_member) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.as_normal_type", return_value="normalized", @@ -58,7 +59,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member) assert result == "normalized" - def test_as_normal_type_propagates_exception(self, mocker) -> None: + def test_as_normal_type_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.as_normal_type", @@ -79,7 +80,7 @@ class TestAgentStrategyParameterType: (AgentStrategyParameter.AgentStrategyParameterType.FILES, []), ], ) - def test_cast_value_calls_external_function(self, mocker, enum_member, value) -> None: + def test_cast_value_calls_external_function(self, mocker: MockerFixture, enum_member, value) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.cast_parameter_value", return_value="casted", @@ -90,7 +91,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member, value) assert result == "casted" - def test_cast_value_propagates_exception(self, mocker) -> None: + def test_cast_value_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.cast_parameter_value", @@ -136,7 +137,7 @@ class TestAgentStrategyParameter: assert any(error["loc"] == ("type",) for error in exc_info.value.errors()) - def test_init_frontend_parameter_calls_external(self, mocker) -> None: + def test_init_frontend_parameter_calls_external(self, mocker: MockerFixture) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", return_value="frontend", @@ -153,7 +154,7 @@ class TestAgentStrategyParameter: mock_func.assert_called_once_with(param, param.type, "value") assert result == "frontend" - def test_init_frontend_parameter_propagates_exception(self, mocker) -> None: + def test_init_frontend_parameter_propagates_exception(self, mocker: MockerFixture) -> None: mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", side_effect=RuntimeError("error"), diff --git a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py index 1c5b6ed944..6dbf301f65 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py @@ -10,7 +10,7 @@ class TestGetParametersFromFeatureDict: """Test suite for get_parameters_from_feature_dict""" @pytest.fixture - def mock_config(self, monkeypatch): + def mock_config(self, monkeypatch: pytest.MonkeyPatch): """Mock dify_config values""" mock = MagicMock() mock.UPLOAD_IMAGE_FILE_SIZE_LIMIT = 1 @@ -23,7 +23,7 @@ class TestGetParametersFromFeatureDict: return mock @pytest.fixture - def mock_default_file_limits(self, monkeypatch): + def mock_default_file_limits(self, monkeypatch: pytest.MonkeyPatch): """Mock DEFAULT_FILE_NUMBER_LIMITS constant""" monkeypatch.setattr(parameters_mapping, "DEFAULT_FILE_NUMBER_LIMITS", 99) return 99 diff --git a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py index 013ed0cbc4..bd4ca5ff85 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.common.sensitive_word_avoidance.manager import ( SensitiveWordAvoidanceConfigManager, @@ -26,7 +27,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result is None - def test_convert_returns_entity_when_enabled(self, mocker): + def test_convert_returns_entity_when_enabled(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() mocker.patch( @@ -48,7 +49,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result == mock_entity - def test_convert_enabled_without_type_or_config(self, mocker): + def test_convert_enabled_without_type_or_config(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() patched = mocker.patch( @@ -135,7 +136,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: with pytest.raises(ValueError, match="must be a dict"): SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(tenant_id="tenant1", config=config) - def test_validate_calls_moderation_factory(self, mocker): + def test_validate_calls_moderation_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -159,7 +160,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: assert result_config["sensitive_word_avoidance"]["enabled"] is True assert fields == ["sensitive_word_avoidance"] - def test_validate_sets_empty_dict_when_config_none(self, mocker): + def test_validate_sets_empty_dict_when_config_none(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -179,7 +180,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: # Assert mock_validate.assert_called_once_with(name="mock_type", tenant_id="tenant1", config={}) - def test_validate_only_structure_validate_skips_factory(self, mocker): + def test_validate_only_structure_validate_skips_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py index 992b580376..359b04070b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.agent.manager import AgentConfigManager @@ -84,7 +85,7 @@ class TestAgentConfigManagerConvert: assert result.strategy.name == "CHAIN_OF_THOUGHT" - def test_convert_skips_disabled_tools(self, mocker, base_config): + def test_convert_skips_disabled_tools(self, mocker: MockerFixture, base_config): # Patch AgentEntity to bypass pydantic validation mock_agent_entity = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentEntity", @@ -128,7 +129,7 @@ class TestAgentConfigManagerConvert: mock_validate.assert_called_once() mock_agent_entity.assert_called_once() - def test_convert_tool_requires_minimum_keys(self, mocker, base_config): + def test_convert_tool_requires_minimum_keys(self, mocker: MockerFixture, base_config): mock_validate = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentToolEntity.model_validate", return_value=MagicMock(), diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py index a688e2a5c5..3a239eac0e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py @@ -2,6 +2,7 @@ import uuid from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.dataset.manager import DatasetConfigManager from core.entities.agent_entities import PlanningStrategy @@ -69,7 +70,7 @@ class TestDatasetConfigManagerConvert: assert result.dataset_ids == [valid_uuid] assert result.retrieve_config.query_variable == "query" - def test_convert_single_with_metadata_configs(self, valid_uuid, mocker): + def test_convert_single_with_metadata_configs(self, valid_uuid, mocker: MockerFixture): mock_retrieve_config = MagicMock() mock_entity = MagicMock() mock_entity.dataset_ids = [valid_uuid] @@ -258,7 +259,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_invalid_uuid(self, mocker): + def test_extract_invalid_uuid(self, mocker: MockerFixture): invalid_uuid = "not-a-uuid" config = { "agent_mode": { @@ -270,7 +271,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_dataset_not_exists(self, valid_uuid, mocker): + def test_extract_dataset_not_exists(self, valid_uuid, mocker: MockerFixture): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, @@ -292,7 +293,7 @@ class TestExtractDatasetConfig: class TestIsDatasetExists: - def test_dataset_exists_true(self, mocker, valid_uuid): + def test_dataset_exists_true(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "tenant1" mocker.patch( @@ -302,14 +303,14 @@ class TestIsDatasetExists: assert DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_not_found(self, mocker, valid_uuid): + def test_dataset_exists_false_when_not_found(self, mocker: MockerFixture, valid_uuid): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, ) assert not DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_tenant_mismatch(self, mocker, valid_uuid): + def test_dataset_exists_false_when_tenant_mismatch(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "other" mocker.patch( diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py index 186b4a501d..e5b581b6a0 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.entities.model_entities import ModelStatus @@ -16,7 +17,7 @@ from graphon.model_runtime.entities.model_entities import ModelPropertyKey class TestModelConfigConverter: @pytest.fixture(autouse=True) - def patch_response_entity(self, mocker): + def patch_response_entity(self, mocker: MockerFixture): """ Patch ModelConfigWithCredentialsEntity to bypass Pydantic validation and return a simple namespace object instead. @@ -69,7 +70,7 @@ class TestModelConfigConverter: return bundle @pytest.fixture - def patch_provider_manager(self, mocker, mock_provider_bundle): + def patch_provider_manager(self, mocker: MockerFixture, mock_provider_bundle): mock_manager = MagicMock() mock_manager.get_provider_model_bundle.return_value = mock_provider_bundle mocker.patch( @@ -99,7 +100,7 @@ class TestModelConfigConverter: assert result.parameters == {"temperature": 0.7} assert result.stop == ["\n"] - def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_app_config.model.mode = None mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { @@ -116,7 +117,9 @@ class TestModelConfigConverter: result = ModelConfigConverter.convert(mock_app_config) assert result.mode == LLMMode.COMPLETION - def test_convert_mode_from_schema_invalid_fallback(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_invalid_fallback( + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture + ): mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { ModelPropertyKey.MODE: "invalid" } @@ -135,7 +138,7 @@ class TestModelConfigConverter: # Credential Errors # ============================= - def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_current_credentials.return_value = None mock_manager = MagicMock() @@ -152,7 +155,7 @@ class TestModelConfigConverter: # Provider Model Errors # ============================= - def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_provider_model.return_value = None mock_manager = MagicMock() @@ -174,7 +177,7 @@ class TestModelConfigConverter: ], ) def test_convert_provider_model_status_errors( - self, mock_app_config, mock_provider_bundle, mocker, status, expected_exception + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture, status, expected_exception ): mock_provider = MagicMock() mock_provider.status = status @@ -194,7 +197,7 @@ class TestModelConfigConverter: # Schema Errors # ============================= - def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.model_type_instance.get_model_schema.return_value = None mock_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py index 68bca485bb..72e334004e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture # Target from core.app.app_config.easy_ui_based_app.model_config.manager import ModelConfigManager @@ -107,7 +108,9 @@ class TestModelConfigManager: # validate_and_set_defaults # ========================================================== - def test_validate_and_set_defaults_success(self, mocker, valid_config, provider_entities, valid_model_list): + def test_validate_and_set_defaults_success( + self, mocker: MockerFixture, valid_config, provider_entities, valid_model_list + ): self._patch_model_assembly( mocker, provider_entities=provider_entities, @@ -127,35 +130,37 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="object type"): ModelConfigManager.validate_and_set_defaults("tenant1", {"model": "invalid"}) - def test_validate_and_set_defaults_missing_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_invalid_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "invalid/provider", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_missing_name(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_name(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.name is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_empty_models(self, mocker, provider_entities): + def test_validate_and_set_defaults_empty_models(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_model_name(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_invalid_model_name( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "invalid", "completion_params": {}}} self._patch_model_assembly( mocker, @@ -166,7 +171,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_default_mode_when_missing(self, mocker, provider_entities): + def test_validate_and_set_defaults_default_mode_when_missing(self, mocker: MockerFixture, provider_entities): model = MagicMock() model.model = "gpt-4" model.model_properties = {} @@ -178,7 +183,9 @@ class TestModelConfigManager: assert updated_config["model"]["mode"] == "completion" - def test_validate_and_set_defaults_missing_completion_params(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_missing_completion_params( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "gpt-4"}} self._patch_model_assembly( mocker, @@ -189,7 +196,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="completion_params is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker, valid_model_list): + def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker: MockerFixture, valid_model_list): """ Covers branch where provider does not contain '/' and ModelProviderID conversion is triggered (line 64). diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py index fd49072cd5..3fd21ab22b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.prompt_template.manager import ( PromptTemplateConfigManager, @@ -38,7 +39,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError, match="prompt_type is required"): PromptTemplateConfigManager.convert({}) - def test_convert_simple_prompt(self, mocker): + def test_convert_simple_prompt(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -56,7 +57,7 @@ class TestPromptTemplateConfigManagerConvert: assert result == "simple_entity" mock_prompt_entity_cls.assert_called_once_with(prompt_type="simple", simple_prompt_template="hello") - def test_convert_advanced_chat_valid(self, mocker): + def test_convert_advanced_chat_valid(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -97,7 +98,7 @@ class TestPromptTemplateConfigManagerConvert: {"text": "hi", "role": 123}, ], ) - def test_convert_advanced_invalid_message_fields(self, mocker, message): + def test_convert_advanced_invalid_message_fields(self, mocker: MockerFixture, message): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -114,7 +115,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError): PromptTemplateConfigManager.convert(config) - def test_convert_advanced_completion_with_roles(self, mocker): + def test_convert_advanced_completion_with_roles(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -154,7 +155,7 @@ class TestValidateAndSetDefaults: def setup_method(self): self.valid_model = {"mode": "chat"} - def _patch_prompt_type(self, mocker): + def _patch_prompt_type(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mocker.patch( @@ -163,7 +164,7 @@ class TestValidateAndSetDefaults: ) return mock_prompt_entity_cls - def test_default_prompt_type_set(self, mocker): + def test_default_prompt_type_set(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = {"model": self.valid_model} @@ -173,7 +174,7 @@ class TestValidateAndSetDefaults: assert result["prompt_type"] == "simple" assert isinstance(keys, list) - def test_invalid_prompt_type_raises(self, mocker): + def test_invalid_prompt_type_raises(self, mocker: MockerFixture): class InvalidEnum(DummyPromptType): def __iter__(self): return iter([DummyEnumValue("valid")]) @@ -191,7 +192,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_invalid_chat_prompt_config_type(self, mocker): + def test_invalid_chat_prompt_config_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -203,7 +204,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_simple_mode_invalid_pre_prompt_type(self, mocker): + def test_simple_mode_invalid_pre_prompt_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -215,7 +216,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_requires_one_config(self, mocker): + def test_advanced_requires_one_config(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -228,7 +229,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_invalid_model_mode(self, mocker): + def test_advanced_invalid_model_mode(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -240,7 +241,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_chat_prompt_length_exceeds(self, mocker): + def test_advanced_chat_prompt_length_exceeds(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -252,7 +253,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_completion_prefix_defaults_set_when_empty(self, mocker): + def test_completion_prefix_defaults_set_when_empty(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py index d9fe7004ff..b82417cfed 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.variables.manager import ( BasicVariablesConfigManager, @@ -15,7 +16,7 @@ class TestBasicVariablesConfigManagerConvert: assert variables == [] assert external == [] - def test_convert_external_data_tools_enabled_and_disabled(self, mocker): + def test_convert_external_data_tools_enabled_and_disabled(self, mocker: MockerFixture): config = { "external_data_tools": [ {"enabled": False}, @@ -232,7 +233,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_disabled_tool_skipped(self, mocker): + def test_validate_disabled_tool_skipped(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": False}]} spy = mocker.patch( @@ -250,7 +251,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_enabled_tool_calls_factory(self, mocker): + def test_validate_enabled_tool_calls_factory(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": True, "type": "tool", "config": {"a": 1}}]} spy = mocker.patch( @@ -263,7 +264,7 @@ class TestValidateExternalDataToolsAndSetDefaults: class TestValidateAndSetDefaultsIntegration: - def test_validate_and_set_defaults_calls_both(self, mocker): + def test_validate_and_set_defaults_calls_both(self, mocker: MockerFixture): config = {} spy_var = mocker.patch.object( diff --git a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py index e99852cf76..e2ab3e2192 100644 --- a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py @@ -2,6 +2,7 @@ from collections import UserDict from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.base_app_config_manager import BaseAppConfigManager @@ -12,7 +13,7 @@ class TestBaseAppConfigManager: return {"key": "value", "another": 123} @pytest.fixture - def mock_app_additional_features(self, mocker): + def mock_app_additional_features(self, mocker: MockerFixture): mock_instance = MagicMock() mocker.patch( "core.app.app_config.base_app_config_manager.AppAdditionalFeatures", @@ -21,7 +22,7 @@ class TestBaseAppConfigManager: return mock_instance @pytest.fixture - def mock_managers(self, mocker): + def mock_managers(self, mocker: MockerFixture): retrieval = mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", return_value="retrieval_result", @@ -72,7 +73,7 @@ class TestBaseAppConfigManager: ) def test_convert_features_all_modes( self, - mocker, + mocker: MockerFixture, mock_config_dict, mock_app_additional_features, mock_managers, @@ -107,7 +108,7 @@ class TestBaseAppConfigManager: mock_managers["speech_to_text"].assert_called_once_with(config=dict(mock_config_dict.items())) mock_managers["text_to_speech"].assert_called_once_with(config=dict(mock_config_dict.items())) - def test_convert_features_empty_config(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_empty_config(self, mocker: MockerFixture, mock_app_additional_features, mock_managers): # Arrange empty_config = {} mock_app_mode = MagicMock() @@ -143,7 +144,7 @@ class TestBaseAppConfigManager: with pytest.raises((TypeError, AttributeError)): BaseAppConfigManager.convert_features(invalid_config, "CHAT") - def test_convert_features_manager_exception_propagates(self, mocker, mock_config_dict): + def test_convert_features_manager_exception_propagates(self, mocker: MockerFixture, mock_config_dict): # Arrange mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", @@ -154,7 +155,9 @@ class TestBaseAppConfigManager: with pytest.raises(RuntimeError): BaseAppConfigManager.convert_features(mock_config_dict, "CHAT") - def test_convert_features_mapping_subclass(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_mapping_subclass( + self, mocker: MockerFixture, mock_app_additional_features, mock_managers + ): # Arrange class CustomMapping(UserDict): pass diff --git a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py index fa128aca87..dacd69a578 100644 --- a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py +++ b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.workflow_ui_based_app.variables.manager import ( WorkflowVariablesConfigManager, @@ -10,19 +11,19 @@ from core.app.app_config.workflow_ui_based_app.variables.manager import ( @pytest.fixture -def mock_workflow(mocker): +def mock_workflow(mocker: MockerFixture): workflow = mocker.MagicMock() workflow.graph_dict = {"nodes": []} return workflow @pytest.fixture -def mock_variable_entity(mocker): +def mock_variable_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.VariableEntity") @pytest.fixture -def mock_rag_entity(mocker): +def mock_rag_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.RagPipelineVariableEntity") diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py index af5d203f12..bc3b06cd1b 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py @@ -111,7 +111,7 @@ class TestAdvancedChatAppGeneratorInternals: workflow_id="workflow-id", ) - def test_generate_loads_conversation_and_files(self, monkeypatch): + def test_generate_loads_conversation_and_files(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() @@ -195,7 +195,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["application_generate_entity"].files == built_files assert build_files_called["called"] is True - def test_resume_delegates_to_generate(self, monkeypatch): + def test_resume_delegates_to_generate(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( task_id="task", @@ -235,7 +235,7 @@ class TestAdvancedChatAppGeneratorInternals: assert result == {"resumed": True} assert captured["graph_runtime_state"] is not None - def test_single_iteration_generate_builds_debug_task(self, monkeypatch): + def test_single_iteration_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -293,7 +293,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_iteration_run.node_id == "node-1" - def test_single_loop_generate_builds_debug_task(self, monkeypatch): + def test_single_loop_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -351,7 +351,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_loop_run.node_id == "node-2" - def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch): + def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -449,7 +449,7 @@ class TestAdvancedChatAppGeneratorInternals: assert isinstance(captured["conversation"], ConversationSnapshot) assert isinstance(captured["message"], MessageSnapshot) - def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch): + def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -535,7 +535,7 @@ class TestAdvancedChatAppGeneratorInternals: db_session.refresh.assert_not_called() db_session.close.assert_called_once() - def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch): + def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -594,7 +594,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch): + def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -658,7 +658,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_handles_stopped_error(self, monkeypatch): + def test_generate_worker_handles_stopped_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -732,7 +732,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_not_called() - def test_generate_worker_handles_validation_error(self, monkeypatch): + def test_generate_worker_handles_validation_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -816,7 +816,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch): + def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch: pytest.MonkeyPatch): app_config = self._build_app_config() @contextmanager @@ -897,7 +897,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -953,7 +953,7 @@ class TestAdvancedChatAppGeneratorInternals: stream=False, ) - def test_handle_response_re_raises_value_error(self, monkeypatch): + def test_handle_response_re_raises_value_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -1002,7 +1002,7 @@ class TestAdvancedChatAppGeneratorInternals: logger_exception.assert_called_once() - def test_generate_worker_handles_invoke_auth_error(self, monkeypatch): + def test_generate_worker_handles_invoke_auth_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -1088,7 +1088,7 @@ class TestAdvancedChatAppGeneratorInternals: assert queue_manager.publish_error.called - def test_generate_debugger_enables_retrieve_source(self, monkeypatch): + def test_generate_debugger_enables_retrieve_source(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -1167,7 +1167,7 @@ class TestAdvancedChatAppGeneratorInternals: assert app_config.additional_features.show_retrieve_source is True assert captured["application_generate_entity"].query == "hello" - def test_generate_service_api_sets_parent_message_id(self, monkeypatch): + def test_generate_service_api_sets_parent_message_id(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index cd85a3b781..e5cb8a3383 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -224,7 +224,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -368,7 +368,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert list(pipeline._handle_loop_next_event(loop_next)) == ["loop_next"] assert list(pipeline._handle_loop_completed_event(loop_done)) == ["loop_done"] - def test_workflow_finish_handlers(self, monkeypatch): + def test_workflow_finish_handlers(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( @@ -593,7 +593,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert message.answer == "hello" assert message.message_metadata - def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch): + def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._message_end_to_stream_response = lambda: "end" saved: list[str] = [] @@ -614,7 +614,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert responses == ["end"] assert saved == ["saved"] - def test_handle_message_end_event_applies_output_moderation(self, monkeypatch): + def test_handle_message_end_event_applies_output_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py index a871e8d93b..d47b70e950 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py @@ -2,6 +2,7 @@ import uuid from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.agent_chat.app_config_manager import ( @@ -11,7 +12,7 @@ from core.entities.agent_entities import PlanningStrategy class TestAgentChatAppConfigManagerGetAppConfig: - def test_get_app_config_override_config(self, mocker): + def test_get_app_config_override_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"ignored": True} @@ -45,7 +46,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.variables == "variables" assert result.external_data_variables == "external" - def test_get_app_config_conversation_specific(self, mocker): + def test_get_app_config_conversation_specific(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -76,7 +77,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.app_model_config_dict == app_model_config.to_dict.return_value assert result.app_model_config_from.value == "conversation-specific-config" - def test_get_app_config_latest_config(self, mocker): + def test_get_app_config_latest_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -107,7 +108,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: class TestAgentChatAppConfigManagerConfigValidate: - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {}, "user_input_form": {}, @@ -247,7 +248,7 @@ class TestValidateAgentModeAndSetDefaults: {"agent_mode": {"enabled": True, "tools": [{"dataset": {"enabled": True, "id": "bad"}}]}}, ) - def test_old_tool_dataset_id_not_exists(self, mocker): + def test_old_tool_dataset_id_not_exists(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=False, @@ -275,7 +276,7 @@ class TestValidateAgentModeAndSetDefaults: "tenant", {"agent_mode": {"enabled": True, "tools": [tool]}} ) - def test_valid_old_and_new_style_tools(self, mocker): + def test_valid_old_and_new_style_tools(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=True, diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py index 80f7f94b1a..6cd62c933a 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py @@ -2,6 +2,7 @@ import contextlib import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator from core.app.apps.exc import GenerateTaskStoppedError @@ -16,7 +17,7 @@ class DummyAccount: @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = AgentChatAppGenerator() mocker.patch( "core.app.apps.agent_chat.app_generator.current_app", @@ -27,19 +28,19 @@ def generator(mocker): class TestAgentChatAppGeneratorGenerate: - def test_generate_rejects_blocking_mode(self, generator, mocker): + def test_generate_rejects_blocking_mode(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={}, invoke_from=mocker.MagicMock(), streaming=False) - def test_generate_requires_query(self, generator, mocker): + def test_generate_requires_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={"inputs": {}}, invoke_from=mocker.MagicMock()) - def test_generate_rejects_non_string_query(self, generator, mocker): + def test_generate_rejects_non_string_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): @@ -50,7 +51,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=mocker.MagicMock(), ) - def test_generate_override_requires_debugger(self, generator, mocker): + def test_generate_override_requires_debugger(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") @@ -62,7 +63,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_success_with_debugger_override(self, generator, mocker): + def test_generate_success_with_debugger_override(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -142,7 +143,7 @@ class TestAgentChatAppGeneratorGenerate: assert result == {"result": "ok"} thread_obj.start.assert_called_once() - def test_generate_without_file_config(self, generator, mocker): + def test_generate_without_file_config(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -213,14 +214,14 @@ class TestAgentChatAppGeneratorGenerate: class TestAgentChatAppGeneratorWorker: @pytest.fixture(autouse=True) - def patch_context(self, mocker): + def patch_context(self, mocker: MockerFixture): @contextlib.contextmanager def ctx_manager(*args, **kwargs): yield mocker.patch("core.app.apps.agent_chat.app_generator.preserve_flask_contexts", ctx_manager) - def test_generate_worker_handles_generate_task_stopped(self, generator, mocker): + def test_generate_worker_handles_generate_task_stopped(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -250,7 +251,7 @@ class TestAgentChatAppGeneratorWorker: Exception("bad"), ], ) - def test_generate_worker_publishes_errors(self, generator, mocker, error): + def test_generate_worker_publishes_errors(self, generator, mocker: MockerFixture, error): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -271,7 +272,7 @@ class TestAgentChatAppGeneratorWorker: assert queue_manager.publish_error.called - def test_generate_worker_logs_value_error_when_debug(self, generator, mocker): + def test_generate_worker_logs_value_error_when_debug(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py index 4567b35480..0260235b03 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.agent.entities import AgentEntity from core.app.apps.agent_chat.app_runner import AgentChatAppRunner @@ -13,7 +14,7 @@ def runner(): class TestAgentChatAppRunnerRun: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", agent=mocker.MagicMock()) generate_entity = mocker.MagicMock(app_config=app_config, inputs={}, query="q", files=[], stream=True) @@ -22,7 +23,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_moderation_error_direct_output(self, runner, mocker): + def test_run_moderation_error_direct_output(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -45,7 +46,7 @@ class TestAgentChatAppRunnerRun: runner.direct_output.assert_called_once() - def test_run_annotation_reply_short_circuits(self, runner, mocker): + def test_run_annotation_reply_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -74,7 +75,7 @@ class TestAgentChatAppRunnerRun: queue_manager.publish.assert_called_once() runner.direct_output.assert_called_once() - def test_run_hosting_moderation_short_circuits(self, runner, mocker): + def test_run_hosting_moderation_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -98,7 +99,7 @@ class TestAgentChatAppRunnerRun: runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_model_schema_missing(self, runner, mocker): + def test_run_model_schema_missing(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -140,7 +141,7 @@ class TestAgentChatAppRunnerRun: (LLMMode.COMPLETION, "CotCompletionAgentRunner"), ], ) - def test_run_chain_of_thought_modes(self, runner, mocker, mode, expected_runner): + def test_run_chain_of_thought_modes(self, runner, mocker: MockerFixture, mode, expected_runner): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -196,7 +197,7 @@ class TestAgentChatAppRunnerRun: runner_instance.run.assert_called_once() runner._handle_invoke_result.assert_called_once() - def test_run_invalid_llm_mode_raises(self, runner, mocker): + def test_run_invalid_llm_mode_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -242,7 +243,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), conversation, message) - def test_run_function_calling_strategy_selected_by_features(self, runner, mocker): + def test_run_function_calling_strategy_selected_by_features(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -298,7 +299,7 @@ class TestAgentChatAppRunnerRun: assert app_config.agent.strategy == AgentEntity.Strategy.FUNCTION_CALLING runner_instance.run.assert_called_once() - def test_run_conversation_not_found(self, runner, mocker): + def test_run_conversation_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -332,7 +333,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_message_not_found(self, runner, mocker): + def test_run_message_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -366,7 +367,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_invalid_agent_strategy_raises(self, runner, mocker): + def test_run_invalid_agent_strategy_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock(strategy="invalid", provider="p", model="m") diff --git a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py index aa2085177e..8dcf6e9193 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.completion.app_runner as module from core.app.apps.completion.app_runner import CompletionAppRunner @@ -47,7 +48,7 @@ def _build_generate_entity(app_config, file_upload_config=None): class TestCompletionAppRunner: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -58,7 +59,7 @@ class TestCompletionAppRunner: with pytest.raises(ValueError): runner.run(app_generate_entity, MagicMock(), MagicMock()) - def test_run_moderation_error_outputs_direct(self, runner, mocker): + def test_run_moderation_error_outputs_direct(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -78,7 +79,7 @@ class TestCompletionAppRunner: runner.direct_output.assert_called_once() runner._handle_invoke_result.assert_not_called() - def test_run_hosting_moderation_stops(self, runner, mocker): + def test_run_hosting_moderation_stops(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -97,7 +98,7 @@ class TestCompletionAppRunner: runner._handle_invoke_result.assert_not_called() - def test_run_dataset_and_external_tools_flow(self, runner, mocker): + def test_run_dataset_and_external_tools_flow(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -140,7 +141,7 @@ class TestCompletionAppRunner: assert dataset_retrieval.retrieve.call_args.kwargs["query"] == "query_from_input" runner._handle_invoke_result.assert_called_once() - def test_run_uses_low_image_detail_default(self, runner, mocker): + def test_run_uses_low_image_detail_default(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py index 024bd8f302..353162be8c 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.completion.app_config_manager as module from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.completion.app_config_manager import CompletionAppConfigManager @@ -8,7 +10,7 @@ from models.model import AppMode class TestCompletionAppConfigManager: - def test_get_app_config_with_override(self, mocker): + def test_get_app_config_with_override(self, mocker: MockerFixture): app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -35,8 +37,8 @@ class TestCompletionAppConfigManager: assert result.external_data_variables == ["ext1"] assert result.app_mode == AppMode.COMPLETION - def test_get_app_config_without_override_uses_model_config(self, mocker): - app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) + def test_get_app_config_without_override_uses_model_config(self, mocker: MockerFixture): + app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -53,7 +55,7 @@ class TestCompletionAppConfigManager: assert result.app_model_config_from == EasyUIBasedAppModelConfigFrom.APP_LATEST_CONFIG assert result.app_model_config_dict == {"model": {"provider": "x"}} - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {"provider": "x"}, "variables": ["v"], diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py index f2e35f9900..de20dde677 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture import core.app.apps.completion.app_generator as module from core.app.apps.completion.app_generator import CompletionAppGenerator @@ -15,7 +16,7 @@ from services.errors.message import MessageNotExistsError @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = CompletionAppGenerator() mocker.patch.object(module, "copy_current_request_context", side_effect=lambda fn: fn) @@ -69,7 +70,7 @@ class TestCompletionAppGenerator: streaming=False, ) - def test_generate_success_no_file_config(self, generator, mocker): + def test_generate_success_no_file_config(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) mocker.patch.object(module.FileUploadConfigManager, "convert", return_value=None) @@ -99,7 +100,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_not_called() - def test_generate_success_with_files(self, generator, mocker): + def test_generate_success_with_files(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -131,7 +132,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_called_once() - def test_generate_override_model_config_debugger(self, generator, mocker): + def test_generate_override_model_config_debugger(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -165,7 +166,7 @@ class TestCompletionAppGenerator: assert get_app_config.call_args.kwargs["override_config_dict"] == override_config - def test_generate_more_like_this_message_not_found(self, generator, mocker): + def test_generate_more_like_this_message_not_found(self, generator, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -178,7 +179,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_disabled(self, generator, mocker): + def test_generate_more_like_this_disabled(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=False, more_like_this_dict={"enabled": False}) @@ -195,7 +196,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_app_model_config_missing(self, generator, mocker): + def test_generate_more_like_this_app_model_config_missing(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = None @@ -212,7 +213,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_message_config_none(self, generator, mocker): + def test_generate_more_like_this_message_config_none(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -229,7 +230,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_success(self, generator, mocker): + def test_generate_more_like_this_success(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -297,7 +298,7 @@ class TestCompletionAppGenerator: (RuntimeError("boom"), True), ], ) - def test_generate_worker_error_handling(self, generator, mocker, error, should_publish): + def test_generate_worker_error_handling(self, generator, mocker: MockerFixture, error, should_publish): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py index 5d4c9bcde0..6c1ee20ffb 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py @@ -1,12 +1,14 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.pipeline.pipeline_config_manager as module from core.app.apps.pipeline.pipeline_config_manager import PipelineConfigManager from models.model import AppMode -def test_get_pipeline_config(mocker): +def test_get_pipeline_config(mocker: MockerFixture): pipeline = MagicMock(tenant_id="tenant", id="pipe1") workflow = MagicMock(id="wf1") @@ -26,7 +28,7 @@ def test_get_pipeline_config(mocker): assert result.rag_pipeline_variables == ["var1"] -def test_config_validate_filters_related_keys(mocker): +def test_config_validate_filters_related_keys(mocker: MockerFixture): config = { "file_upload": {"enabled": True}, "tts": {"enabled": True}, diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index c36edf48fc..dd91243a37 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -3,6 +3,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, PropertyMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_generator as module from core.app.apps.exc import GenerateTaskStoppedError @@ -23,7 +24,7 @@ class FakeRagPipelineGenerateEntity(SimpleNamespace): @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = module.PipelineGenerator() mocker.patch.object(module, "RagPipelineGenerateEntity", FakeRagPipelineGenerateEntity) @@ -88,7 +89,7 @@ class DummySession: return False -def test_generate_dataset_missing(generator, mocker): +def test_generate_dataset_missing(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -106,7 +107,7 @@ def test_generate_dataset_missing(generator, mocker): ) -def test_generate_debugger_calls_generate(generator, mocker): +def test_generate_debugger_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -150,7 +151,7 @@ def test_generate_debugger_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker): +def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -228,7 +229,7 @@ def test_generate_published_pipeline_creates_documents_and_delay(generator, mock task_proxy.delay.assert_called_once() -def test_generate_is_retry_calls_generate(generator, mocker): +def test_generate_is_retry_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -273,7 +274,7 @@ def test_generate_is_retry_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_worker_handles_errors(generator, mocker): +def test_generate_worker_handles_errors(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -308,7 +309,7 @@ def test_generate_worker_handles_errors(generator, mocker): queue_manager.publish_error.assert_called_once() -def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker): +def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -341,7 +342,7 @@ def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker assert module.PipelineRunner.call_args.kwargs["system_user_id"] == "session" -def test_generate_raises_when_workflow_not_found(generator, mocker): +def test_generate_raises_when_workflow_not_found(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -369,7 +370,7 @@ def test_generate_raises_when_workflow_not_found(generator, mocker): ) -def test_generate_success_returns_converted(generator, mocker): +def test_generate_success_returns_converted(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -409,7 +410,7 @@ def test_generate_success_returns_converted(generator, mocker): assert result == "converted" -def test_single_iteration_generate_validates_inputs(generator, mocker): +def test_single_iteration_generate_validates_inputs(generator, mocker: MockerFixture): with pytest.raises(ValueError): generator.single_iteration_generate(_build_pipeline(), _build_workflow(), "", _build_user(), {}) @@ -419,7 +420,7 @@ def test_single_iteration_generate_validates_inputs(generator, mocker): ) -def test_single_iteration_generate_dataset_required(generator, mocker): +def test_single_iteration_generate_dataset_required(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -436,7 +437,7 @@ def test_single_iteration_generate_dataset_required(generator, mocker): ) -def test_single_iteration_generate_success(generator, mocker): +def test_single_iteration_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -476,7 +477,7 @@ def test_single_iteration_generate_success(generator, mocker): assert result == {"ok": True} -def test_single_loop_generate_success(generator, mocker): +def test_single_loop_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -516,7 +517,7 @@ def test_single_loop_generate_success(generator, mocker): assert result == {"ok": True} -def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker): +def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() app_entity = FakeRagPipelineGenerateEntity(task_id="t") @@ -536,7 +537,7 @@ def test_handle_response_value_error_triggers_generate_task_stopped(generator, m ) -def test_build_document_sets_metadata_for_builtin_fields(generator, mocker): +def test_build_document_sets_metadata_for_builtin_fields(generator, mocker: MockerFixture): class DummyDocument(SimpleNamespace): pass @@ -620,7 +621,7 @@ def test_format_datasource_info_list_missing_node_data(generator): ) -def test_format_datasource_info_list_online_drive_folder(generator, mocker): +def test_format_datasource_info_list_online_drive_folder(generator, mocker: MockerFixture): workflow = MagicMock( graph_dict={ "nodes": [ diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py index 9db83f5531..abfc76afa0 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_queue_manager as module from core.app.apps.base_app_queue_manager import PublishFrom @@ -16,7 +17,7 @@ from core.app.entities.queue_entities import ( from graphon.model_runtime.entities.llm_entities import LLMResult -def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): +def test_publish_sets_stop_listen_and_raises_on_stopped(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -28,7 +29,7 @@ def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): manager.stop_listen.assert_called_once() -def test_publish_stop_events_trigger_stop_listen(mocker): +def test_publish_stop_events_trigger_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -46,7 +47,7 @@ def test_publish_stop_events_trigger_stop_listen(mocker): manager.stop_listen.assert_called_once() -def test_publish_non_stop_event_no_stop_listen(mocker): +def test_publish_non_stop_event_no_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 603062a51c..1eed76cf84 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -22,6 +22,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_runner as module from core.app.apps.pipeline.pipeline_runner import PipelineRunner @@ -126,7 +127,7 @@ def test_update_document_status_on_failure(mocker, runner): session.commit.assert_called_once() -def test_run_pipeline_not_found(mocker): +def test_run_pipeline_not_found(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.invoke_from = InvokeFrom.WEB_APP app_generate_entity.single_iteration_run = None @@ -150,7 +151,7 @@ def test_run_pipeline_not_found(mocker): runner.run() -def test_run_workflow_not_initialized(mocker): +def test_run_workflow_not_initialized(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") @@ -174,7 +175,7 @@ def test_run_workflow_not_initialized(mocker): runner.run() -def test_run_single_iteration_path(mocker): +def test_run_single_iteration_path(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.single_iteration_run = MagicMock() @@ -223,7 +224,7 @@ def test_run_single_iteration_path(mocker): runner._handle_event.assert_called() -def test_run_normal_path_builds_graph(mocker): +def test_run_normal_path_builds_graph(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") diff --git a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py index f48a7fb38e..835c9a8576 100644 --- a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py @@ -45,7 +45,7 @@ def _make_generate_entity(app_config: WorkflowUIBasedAppConfig) -> AdvancedChatA @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -108,7 +108,7 @@ def test_init_generate_records_marks_existing_conversation(): assert entity.is_new_conversation is False -def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch): +def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch: pytest.MonkeyPatch): app_config = _make_app_config() entity = _make_generate_entity(app_config) entity.conversation_id = "existing-conversation-id" diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py index b0f8b423e1..f2a1700664 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py @@ -369,7 +369,7 @@ def test_validate_inputs_optional_file_with_empty_string_ignores_default(): class TestBaseAppGeneratorExtras: - def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch): + def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch: pytest.MonkeyPatch): base_app_generator = BaseAppGenerator() variables = [ diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py index 17de39ca99..c6eedf7be7 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py @@ -42,7 +42,7 @@ class _QueueRecorder: class TestAppRunner: - def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch): + def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -65,7 +65,7 @@ class TestAppRunner: assert model_config.parameters["max_tokens"] == 20 - def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch): + def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -86,7 +86,7 @@ class TestAppRunner: assert runner.recalc_llm_max_tokens(model_config, prompt_messages=[]) == -1 - def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch): + def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(model_conf=SimpleNamespace(model="mock"), stream=True) @@ -133,7 +133,7 @@ class TestAppRunner: stream=True, ) - def test_organize_prompt_messages_simple_template(self, monkeypatch): + def test_organize_prompt_messages_simple_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=["STOP"]) prompt_template_entity = PromptTemplateEntity( @@ -158,7 +158,7 @@ class TestAppRunner: assert prompt_messages == ["simple-message"] assert stop == ["simple-stop"] - def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="completion", stop=[""]) captured: dict[str, object] = {} @@ -191,7 +191,7 @@ class TestAppRunner: assert memory_config.role_prefix.user == "U" assert memory_config.role_prefix.assistant == "A" - def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=[""]) captured: dict[str, object] = {} @@ -245,7 +245,7 @@ class TestAppRunner: files=[], ) - def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch): + def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() warning_logger = MagicMock() @@ -284,7 +284,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.message.content == "abc" warning_logger.assert_called_once() - def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch): + def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() exception_logger = MagicMock() @@ -331,7 +331,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.usage == usage exception_logger.assert_called_once() - def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch): + def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() class _ToggleBool: @@ -367,7 +367,7 @@ class TestAppRunner: db_session.add.assert_not_called() queue_manager.publish.assert_not_called() - def test_check_hosting_moderation_direct_output_called(self, monkeypatch): + def test_check_hosting_moderation_direct_output_called(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(stream=False) @@ -388,7 +388,7 @@ class TestAppRunner: assert result is True assert direct_output.called - def test_fill_in_inputs_from_external_data_tools(self, monkeypatch): + def test_fill_in_inputs_from_external_data_tools(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.ExternalDataFetch.fetch", @@ -405,7 +405,7 @@ class TestAppRunner: assert result == {"foo": "bar"} - def test_moderation_for_inputs_returns_result(self, monkeypatch): + def test_moderation_for_inputs_returns_result(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.InputModeration.check", @@ -424,7 +424,7 @@ class TestAppRunner: assert result == (True, {}, "") - def test_query_app_annotations_to_reply(self, monkeypatch): + def test_query_app_annotations_to_reply(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.AnnotationReplyFeature.query", diff --git a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py index 1250ac5ecf..6a9b5e7619 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py @@ -85,7 +85,7 @@ def _make_chat_generate_entity(app_config: EasyUIBasedAppConfig) -> ChatAppGener @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -130,7 +130,7 @@ def test_init_generate_records_sets_conversation_fields_for_chat_entity(): class TestMessageBasedAppGeneratorExtras: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() class _Pipeline: @@ -155,7 +155,7 @@ class TestMessageBasedAppGeneratorExtras: stream=False, ) - def test_get_app_model_config_requires_valid_config(self, monkeypatch): + def test_get_app_model_config_requires_valid_config(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() app_model = SimpleNamespace(id="app", app_model_config_id=None, app_model_config=None) diff --git a/api/tests/unit_tests/core/app/apps/test_pause_resume.py b/api/tests/unit_tests/core/app/apps/test_pause_resume.py index 6104b8d6ca..aa71f4d9c4 100644 --- a/api/tests/unit_tests/core/app/apps/test_pause_resume.py +++ b/api/tests/unit_tests/core/app/apps/test_pause_resume.py @@ -3,6 +3,8 @@ import time from types import ModuleType, SimpleNamespace from typing import Any +from pytest_mock import MockerFixture + import graphon.nodes.human_input.entities # noqa: F401 from core.app.apps.advanced_chat import app_generator as adv_app_gen_module from core.app.apps.workflow import app_generator as wf_app_gen_module @@ -101,7 +103,7 @@ class _StubToolNode(Node[_StubToolNodeData]): yield self._convert_node_run_result_to_graph_node_event(result) -def _patch_tool_node(mocker): +def _patch_tool_node(mocker: MockerFixture): original_resolve_node_class = node_factory_module.resolve_workflow_node_class def _patched_resolve_node_class(*, node_type: NodeType, node_version: str) -> type[Node]: @@ -196,7 +198,7 @@ def _node_successes(events: list[GraphEngineEvent]) -> list[str]: return [evt.node_id for evt in events if isinstance(evt, NodeRunSucceededEvent)] -def test_workflow_app_pause_resume_matches_baseline(mocker): +def test_workflow_app_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("baseline") @@ -236,7 +238,7 @@ def test_workflow_app_pause_resume_matches_baseline(mocker): assert resumed_state.outputs == baseline_outputs -def test_advanced_chat_pause_resume_matches_baseline(mocker): +def test_advanced_chat_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("adv-baseline") diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index 58f0e47a4b..12f3ed9f07 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -54,7 +54,7 @@ class FakeTopic: return self._state["subscribed"] -def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch): +def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() def fake_get_response_topic(cls, app_mode, workflow_run_id): @@ -92,7 +92,7 @@ def test_normalize_terminal_events_empty_values(): assert _normalize_terminal_events([]) == set({}) -def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): +def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py index 7e8367c6c4..0e9f8b6f35 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + from core.app.apps.workflow.app_generator import SKIP_PREPARE_USER_INPUTS_KEY, WorkflowAppGenerator @@ -22,7 +24,7 @@ def test_should_prepare_user_inputs_keeps_validation_when_flag_false(): assert WorkflowAppGenerator()._should_prepare_user_inputs(args) -def test_resume_delegates_to_generate(mocker): +def test_resume_delegates_to_generate(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_generate = mocker.patch.object(generator, "_generate", return_value="ok") @@ -52,7 +54,7 @@ def test_resume_delegates_to_generate(mocker): assert kwargs["invoke_from"] == "debugger" -def test_generate_appends_pause_layer_and_forwards_state(mocker): +def test_generate_appends_pause_layer_and_forwards_state(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_queue_manager = MagicMock() @@ -124,7 +126,7 @@ def test_generate_appends_pause_layer_and_forwards_state(mocker): assert worker_kwargs["kwargs"]["graph_runtime_state"] is graph_runtime_state -def test_resume_path_runs_worker_with_runtime_state(mocker): +def test_resume_path_runs_worker_with_runtime_state(mocker: MockerFixture): generator = WorkflowAppGenerator() runtime_state = MagicMock(name="runtime-state") diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py index adabf5d495..dbe846cbc5 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py @@ -93,7 +93,7 @@ class TestWorkflowBasedAppRunner: with pytest.raises(ValueError, match="Neither single_iteration_run nor single_loop_run"): runner._prepare_single_node_execution(workflow, None, None, user_id="00000000-0000-0000-0000-000000000001") - def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch): + def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch: pytest.MonkeyPatch): runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=default_system_variables()), @@ -145,7 +145,9 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool is graph_runtime_state.variable_pool - def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init(self, monkeypatch): + def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init( + self, monkeypatch: pytest.MonkeyPatch + ): variable_loader = SimpleNamespace( load_variables=lambda selectors: ( [ @@ -235,7 +237,7 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool.get(["sys", "conversation_id"]).value == "conv-1" - def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch): + def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch: pytest.MonkeyPatch): published: list[object] = [] class _QueueManager: diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py index 09ad078a70..320189143e 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py @@ -67,7 +67,7 @@ class TestWorkflowAppGeneratorValidation: class TestWorkflowAppGeneratorHandleResponse: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -116,7 +116,7 @@ class TestWorkflowAppGeneratorHandleResponse: class TestWorkflowAppGeneratorGenerate: - def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch): + def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 0bcc1029b0..1311d5e9cb 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -187,7 +187,7 @@ class TestWorkflowGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -408,7 +408,7 @@ class TestWorkflowGenerateTaskPipeline: assert list(pipeline._handle_human_input_form_timeout_event(timeout_event)) == ["timeout"] assert list(pipeline._handle_agent_log_event(agent_event)) == ["log"] - def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch): + def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -560,7 +560,7 @@ class TestWorkflowGenerateTaskPipeline: responses = list(pipeline._wrapper_process_stream_response()) assert responses == [PingStreamResponse(task_id="task")] - def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch): + def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -597,7 +597,7 @@ class TestWorkflowGenerateTaskPipeline: assert sleep_spy assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch): + def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -633,7 +633,7 @@ class TestWorkflowGenerateTaskPipeline: assert logger_exception assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_database_session_rolls_back_on_error(self, monkeypatch): + def test_database_session_rolls_back_on_error(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() calls = {"enter": 0, "exit_exc": None} diff --git a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py index a20d89d807..f10e0084d0 100644 --- a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py @@ -143,7 +143,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._listen_audio_msg(publisher=None, task_id="task") is None - def test_process_stream_response_handles_chunks_and_end(self, monkeypatch): + def test_process_stream_response_handles_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -245,7 +245,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(event, QueueLLMChunkEvent) for event in events) assert any(isinstance(event, QueueStopEvent) for event in events) - def test_handle_stop_updates_usage(self, monkeypatch): + def test_handle_stop_updates_usage(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -313,7 +313,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._task_state.llm_result.usage.prompt_tokens == 10 assert pipeline._task_state.llm_result.usage.completion_tokens == 5 - def test_record_files_builds_file_payloads(self, monkeypatch): + def test_record_files_builds_file_payloads(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -405,7 +405,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert files assert len(files) == 3 - def test_process_stream_response_handles_annotation_and_error(self, monkeypatch): + def test_process_stream_response_handles_annotation_and_error(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -472,7 +472,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert isinstance(responses[-1], ValueError) assert pipeline._task_state.llm_result.message.content == "annotatedagent" - def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -681,7 +681,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses == ["payload"] - def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch): + def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -715,7 +715,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses[1] == "payload" assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch): + def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -756,7 +756,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(item, MessageAudioStreamResponse) for item in responses) assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch): + def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -896,7 +896,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert list(pipeline._process_stream_response(publisher=None)) == [] - def test_save_message_persists_fields_and_emits_trace(self, monkeypatch): + def test_save_message_persists_fields_and_emits_trace(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -981,7 +981,7 @@ class TestEasyUiBasedGenerateTaskPipeline: with pytest.raises(ValueError, match="Conversation conv not found"): pipeline._save_message(session=session) - def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch): + def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1021,7 +1021,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.metadata["usage"]["prompt_tokens"] == 1 - def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch): + def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1059,7 +1059,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.files is None - def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch): + def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1155,7 +1155,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.answer == "hello" - def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( diff --git a/api/tests/unit_tests/core/app/workflow/test_node_factory.py b/api/tests/unit_tests/core/app/workflow/test_node_factory.py index 30a068f4c5..7c9f174129 100644 --- a/api/tests/unit_tests/core/app/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/app/workflow/test_node_factory.py @@ -46,7 +46,7 @@ class TestDifyNodeFactory: lambda **_kwargs: node_class, ) - def _factory(self, monkeypatch): + def _factory(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_STRING_LENGTH", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_NUMBER", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MIN_NUMBER", -10) @@ -72,20 +72,20 @@ class TestDifyNodeFactory: graph_runtime_state=SimpleNamespace(), ) - def test_create_node_unknown_type(self, monkeypatch): + def test_create_node_unknown_type(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": "unknown"}}) - def test_create_node_missing_mapping(self, monkeypatch): + def test_create_node_missing_mapping(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr("core.workflow.node_factory.get_node_type_classes_mapping", lambda: {}) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_missing_latest_class(self, monkeypatch): + def test_create_node_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr( "core.workflow.node_factory.get_node_type_classes_mapping", @@ -96,7 +96,7 @@ class TestDifyNodeFactory: with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_selects_versioned_class(self, monkeypatch): + def test_create_node_selects_versioned_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) selected_versions: list[tuple[str, str]] = [] @@ -115,7 +115,7 @@ class TestDifyNodeFactory: assert node.id == "node-1" assert selected_versions == [("snapshot", "called")] - def test_create_node_code_branch(self, monkeypatch): + def test_create_node_code_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyCodeNode) @@ -124,7 +124,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyCodeNode) assert node.id == "node-1" - def test_create_node_template_transform_branch(self, monkeypatch): + def test_create_node_template_transform_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyTemplateTransformNode) @@ -133,7 +133,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyTemplateTransformNode) assert "jinja2_template_renderer" in node.kwargs - def test_create_node_http_request_branch(self, monkeypatch): + def test_create_node_http_request_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyHttpRequestNode) @@ -142,7 +142,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyHttpRequestNode) assert "http_request_config" in node.kwargs - def test_create_node_knowledge_retrieval_branch(self, monkeypatch): + def test_create_node_knowledge_retrieval_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyKnowledgeRetrievalNode) @@ -151,7 +151,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyKnowledgeRetrievalNode) assert node.kwargs == {} - def test_create_node_document_extractor_branch(self, monkeypatch): + def test_create_node_document_extractor_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyDocumentExtractorNode) diff --git a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py index 82552470a9..04ce524904 100644 --- a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py +++ b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py @@ -2,12 +2,14 @@ from __future__ import annotations from types import SimpleNamespace +import pytest + from core.app.workflow.layers.observability import ObservabilityLayer from graphon.enums import BuiltinNodeTypes class TestObservabilityLayerExtras: - def test_init_tracer_enabled_sets_tracer(self, monkeypatch): + def test_init_tracer_enabled_sets_tracer(self, monkeypatch: pytest.MonkeyPatch): tracer = object() monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -18,7 +20,7 @@ class TestObservabilityLayerExtras: assert layer._is_disabled is False assert layer._tracer is tracer - def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch, caplog): + def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch: pytest.MonkeyPatch, caplog): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -33,7 +35,7 @@ class TestObservabilityLayerExtras: assert layer._tracer is None assert "Failed to get OpenTelemetry tracer" in caplog.text - def test_init_tracer_disables_when_otel_disabled(self, monkeypatch): + def test_init_tracer_disables_when_otel_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", False) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -143,7 +145,7 @@ class TestObservabilityLayerExtras: assert layer._node_contexts == {} - def test_on_node_run_end_calls_span_end(self, monkeypatch): + def test_on_node_run_end_calls_span_end(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False ended: list[str] = [] @@ -164,7 +166,7 @@ class TestObservabilityLayerExtras: assert ended == ["ended"] assert "exec" not in layer._node_contexts - def test_on_node_run_end_logs_detach_failure(self, monkeypatch, caplog): + def test_on_node_run_end_logs_detach_failure(self, monkeypatch: pytest.MonkeyPatch, caplog): layer = ObservabilityLayer() layer._is_disabled = False @@ -186,7 +188,7 @@ class TestObservabilityLayerExtras: assert "Failed to detach OpenTelemetry token" in caplog.text assert "exec" not in layer._node_contexts - def test_on_node_run_start_and_end_creates_span(self, monkeypatch): + def test_on_node_run_start_and_end_creates_span(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False diff --git a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py index cacb4dd4fa..23fe682017 100644 --- a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py +++ b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py @@ -120,7 +120,7 @@ class TestWorkflowPersistenceLayer: with pytest.raises(ValueError, match="workflow_execution_id must be provided"): layer._get_execution_id() - def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch): + def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch: pytest.MonkeyPatch): layer, _, _, _ = _make_layer() monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py index 7b433ab57b..1125ce6dbc 100644 --- a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py +++ b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py @@ -3,6 +3,7 @@ import queue from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.base.tts.app_generator_tts_publisher import ( AppGeneratorTTSPublisher, @@ -17,7 +18,7 @@ from core.base.tts.app_generator_tts_publisher import ( @pytest.fixture -def mock_model_instance(mocker): +def mock_model_instance(mocker: MockerFixture): model = mocker.MagicMock() model.invoke_tts.return_value = [b"audio1", b"audio2"] model.get_tts_voices.return_value = [{"value": "voice1"}, {"value": "voice2"}] @@ -33,7 +34,7 @@ def mock_model_manager(mocker, mock_model_instance): @pytest.fixture(autouse=True) -def patch_threads(mocker): +def patch_threads(mocker: MockerFixture): """Prevent real threads from starting during tests""" mocker.patch("threading.Thread.start", return_value=None) @@ -114,7 +115,7 @@ class TestProcessFuture: finish = audio_queue.get() assert finish.status == "finish" - def test_process_future_exception(self, mocker): + def test_process_future_exception(self, mocker: MockerFixture): future_queue = queue.Queue() audio_queue = queue.Queue() @@ -222,7 +223,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker): + def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -297,7 +298,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -332,7 +333,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "Hello " - def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -358,7 +359,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "" - def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker): + def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() diff --git a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py index 4c1aa33540..f9b3b1864e 100644 --- a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py @@ -1,8 +1,10 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.callback_handler.agent_tool_callback_handler as module +from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler # ----------------------------- # Fixtures @@ -10,17 +12,17 @@ import core.callback_handler.agent_tool_callback_handler as module @pytest.fixture -def enable_debug(mocker): +def enable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", True) @pytest.fixture -def disable_debug(mocker): +def disable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", False) @pytest.fixture -def mock_print(mocker): +def mock_print(mocker: MockerFixture): return mocker.patch("builtins.print") @@ -71,7 +73,7 @@ class TestPrintText: module.print_text("hello") mock_print.assert_called_once_with("hello", end="", file=None) - def test_print_text_with_color(self, mocker, mock_print): + def test_print_text_with_color(self, mocker: MockerFixture, mock_print): mock_get_color = mocker.patch( "core.callback_handler.agent_tool_callback_handler.get_colored_text", return_value="colored_text", @@ -82,7 +84,7 @@ class TestPrintText: mock_get_color.assert_called_once_with("hello", "green") mock_print.assert_called_once_with("colored_text", end="", file=None) - def test_print_text_with_file_flush(self, mocker): + def test_print_text_with_file_flush(self, mocker: MockerFixture): mock_file = MagicMock() mock_print = mocker.patch("builtins.print") @@ -107,21 +109,25 @@ class TestDifyAgentCallbackHandler: assert handler.color == "green" assert handler.current_loop == 1 - def test_on_tool_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_start_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_called() - def test_on_tool_start_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_start_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_not_called() - def test_on_tool_end_debug_enabled_and_trace(self, handler, enable_debug, mocker): + def test_on_tool_end_debug_enabled_and_trace( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") mock_trace_manager = MagicMock() @@ -137,7 +143,9 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 mock_trace_manager.add_trace_task.assert_called_once() - def test_on_tool_end_without_trace_manager(self, handler, enable_debug, mocker): + def test_on_tool_end_without_trace_manager( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_end( @@ -148,14 +156,16 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 - def test_on_tool_error_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_error_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) mock_print_text.assert_called_once() - def test_on_tool_error_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_error_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) @@ -163,14 +173,16 @@ class TestDifyAgentCallbackHandler: mock_print_text.assert_not_called() @pytest.mark.parametrize("thought", ["thinking", ""]) - def test_on_agent_start(self, handler, enable_debug, mocker, thought): + def test_on_agent_start(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture, thought): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_agent_start(thought) mock_print_text.assert_called() - def test_on_agent_finish_increments_loop(self, handler, enable_debug, mocker): + def test_on_agent_finish_increments_loop( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") current_loop = handler.current_loop @@ -179,19 +191,21 @@ class TestDifyAgentCallbackHandler: assert handler.current_loop == current_loop + 1 mock_print_text.assert_called() - def test_on_datasource_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_datasource_start_debug_enabled( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_datasource_start("ds1", {"x": 1}) mock_print_text.assert_called_once() - def test_ignore_agent_property(self, disable_debug, handler): + def test_ignore_agent_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is True - def test_ignore_chat_model_property(self, disable_debug, handler): + def test_ignore_chat_model_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_chat_model is True - def test_ignore_properties_when_debug_enabled(self, enable_debug, handler): + def test_ignore_properties_when_debug_enabled(self, enable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is False assert handler.ignore_chat_model is False diff --git a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py index 8e5670e9be..f23669c3c7 100644 --- a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom from core.callback_handler.index_tool_callback_handler import ( @@ -7,12 +8,12 @@ from core.callback_handler.index_tool_callback_handler import ( @pytest.fixture -def mock_queue_manager(mocker): +def mock_queue_manager(mocker: MockerFixture): return mocker.Mock() @pytest.fixture -def handler(mock_queue_manager, mocker): +def handler(mock_queue_manager, mocker: MockerFixture): mocker.patch( "core.callback_handler.index_tool_callback_handler.db", ) @@ -34,7 +35,7 @@ class TestOnQuery: (InvokeFrom.WEB_APP, "end_user"), ], ) - def test_on_query_success_roles(self, mocker, mock_queue_manager, invoke_from, expected_role): + def test_on_query_success_roles(self, mocker: MockerFixture, mock_queue_manager, invoke_from, expected_role): # Arrange mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") @@ -57,7 +58,7 @@ class TestOnQuery: assert dataset_query.created_by_role == expected_role mock_db.session.commit.assert_called_once() - def test_on_query_none_values(self, mocker, mock_queue_manager): + def test_on_query_none_values(self, mocker: MockerFixture, mock_queue_manager): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") handler = DatasetIndexToolCallbackHandler( @@ -75,7 +76,7 @@ class TestOnQuery: class TestOnToolEnd: - def test_on_tool_end_no_metadata(self, handler, mocker): + def test_on_tool_end_no_metadata(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") document = mocker.Mock() @@ -85,7 +86,9 @@ class TestOnToolEnd: mock_db.session.commit.assert_not_called() - def test_on_tool_end_dataset_document_not_found(self, handler, mocker): + def test_on_tool_end_dataset_document_not_found( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_db.session.scalar.return_value = None @@ -96,7 +99,9 @@ class TestOnToolEnd: mock_db.session.scalar.assert_called_once() - def test_on_tool_end_parent_child_index_with_child(self, handler, mocker): + def test_on_tool_end_parent_child_index_with_child( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -119,7 +124,7 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_non_parent_child_index(self, handler, mocker): + def test_on_tool_end_non_parent_child_index(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -139,12 +144,12 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_empty_documents(self, handler): + def test_on_tool_end_empty_documents(self, handler: DatasetIndexToolCallbackHandler): handler.on_tool_end([]) class TestReturnRetrieverResourceInfo: - def test_publish_called(self, handler, mock_queue_manager, mocker): + def test_publish_called(self, handler: DatasetIndexToolCallbackHandler, mock_queue_manager, mocker: MockerFixture): mock_event = mocker.patch("core.callback_handler.index_tool_callback_handler.QueueRetrieverResourcesEvent") resources = [mocker.Mock()] diff --git a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py index 131fb006ed..5b53c5965c 100644 --- a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, call import pytest +from pytest_mock import MockerFixture from core.callback_handler.workflow_tool_callback_handler import ( DifyWorkflowCallbackHandler, @@ -26,13 +27,13 @@ def handler(): @pytest.fixture -def mock_print_text(mocker): +def mock_print_text(mocker: MockerFixture): """Mock print_text to avoid real stdout printing.""" return mocker.patch("core.callback_handler.workflow_tool_callback_handler.print_text") class TestDifyWorkflowCallbackHandler: - def test_on_tool_execution_single_output_success(self, handler, mock_print_text): + def test_on_tool_execution_single_output_success(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "test_tool" tool_inputs = {"a": 1} @@ -62,7 +63,7 @@ class TestDifyWorkflowCallbackHandler: ] ) - def test_on_tool_execution_multiple_outputs(self, handler, mock_print_text): + def test_on_tool_execution_multiple_outputs(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "multi_tool" outputs = [ @@ -83,7 +84,7 @@ class TestDifyWorkflowCallbackHandler: assert results == outputs assert mock_print_text.call_count == 4 * len(outputs) - def test_on_tool_execution_empty_iterable(self, handler, mock_print_text): + def test_on_tool_execution_empty_iterable(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "empty_tool" @@ -108,7 +109,9 @@ class TestDifyWorkflowCallbackHandler: ("not_iterable", AttributeError), ], ) - def test_on_tool_execution_invalid_outputs_type(self, handler, invalid_outputs, expected_exception): + def test_on_tool_execution_invalid_outputs_type( + self, handler: DifyWorkflowCallbackHandler, invalid_outputs, expected_exception + ): # Arrange tool_name = "invalid_tool" @@ -122,7 +125,7 @@ class TestDifyWorkflowCallbackHandler: ) ) - def test_on_tool_execution_long_json_truncation(self, handler, mock_print_text): + def test_on_tool_execution_long_json_truncation(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "long_json_tool" long_json = "x" * 1500 @@ -144,7 +147,7 @@ class TestDifyWorkflowCallbackHandler: color="blue", ) - def test_on_tool_execution_model_dump_json_exception(self, handler, mock_print_text): + def test_on_tool_execution_model_dump_json_exception(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "exception_tool" bad_message = MagicMock() @@ -163,7 +166,9 @@ class TestDifyWorkflowCallbackHandler: # Ensure first two prints happened before failure assert mock_print_text.call_count >= 2 - def test_on_tool_execution_none_message_id_and_trace_manager(self, handler, mock_print_text): + def test_on_tool_execution_none_message_id_and_trace_manager( + self, handler: DifyWorkflowCallbackHandler, mock_print_text + ): # Arrange tool_name = "optional_params_tool" message = DummyToolInvokeMessage('{"data": "ok"}') diff --git a/api/tests/unit_tests/core/datasource/test_datasource_manager.py b/api/tests/unit_tests/core/datasource/test_datasource_manager.py index deeac49bbc..8842d678c7 100644 --- a/api/tests/unit_tests/core/datasource/test_datasource_manager.py +++ b/api/tests/unit_tests/core/datasource/test_datasource_manager.py @@ -2,6 +2,7 @@ import types from collections.abc import Generator import pytest +from pytest_mock import MockerFixture from contexts.wrapper import RecyclableContextVar from core.datasource.datasource_manager import DatasourceManager @@ -37,7 +38,7 @@ def _invalidate_recyclable_contextvars() -> None: RecyclableContextVar.increment_thread_recycles() -def test_get_icon_url_calls_runtime(mocker): +def test_get_icon_url_calls_runtime(mocker: MockerFixture): fake_runtime = mocker.Mock() fake_runtime.get_icon_url.return_value = "https://icon" mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=fake_runtime) @@ -52,7 +53,7 @@ def test_get_icon_url_calls_runtime(mocker): DatasourceManager.get_datasource_runtime.assert_called_once() -def test_get_datasource_runtime_delegates_to_provider_controller(mocker): +def test_get_datasource_runtime_delegates_to_provider_controller(mocker: MockerFixture): provider_controller = mocker.Mock() provider_controller.get_datasource.return_value = object() mocker.patch.object(DatasourceManager, "get_datasource_plugin_provider", return_value=provider_controller) @@ -114,7 +115,7 @@ def test_get_datasource_plugin_provider_creates_controller_and_caches(mocker, da assert ctrl_cls.call_count == 1 -def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker): +def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker: MockerFixture): _invalidate_recyclable_contextvars() mocker.patch( "core.datasource.datasource_manager.PluginDatasourceManager.fetch_datasource_provider", @@ -129,7 +130,7 @@ def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mock ) -def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): +def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -145,7 +146,7 @@ def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): ) -def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): +def test_get_datasource_plugin_provider_raises_when_controller_none(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -165,7 +166,7 @@ def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): ) -def test_stream_online_results_yields_messages_online_document(mocker): +def test_stream_online_results_yields_messages_online_document(mocker: MockerFixture): # stub runtime to yield a text message def _doc_messages(**_): yield from _gen_messages_text_only("hello") @@ -195,7 +196,7 @@ def test_stream_online_results_yields_messages_online_document(mocker): assert msgs[0].message.text == "hello" -def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker): +def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -229,7 +230,7 @@ def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_do assert final_value == {} -def test_stream_online_results_raises_when_missing_params(mocker): +def test_stream_online_results_raises_when_missing_params(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -279,7 +280,7 @@ def test_stream_online_results_raises_when_missing_params(mocker): ) -def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker): +def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -313,7 +314,7 @@ def test_stream_online_results_yields_messages_and_returns_empty_dict_online_dri assert final_value == {} -def test_stream_online_results_raises_for_unsupported_stream_type(mocker): +def test_stream_online_results_raises_for_unsupported_stream_type(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=mocker.Mock()) mocker.patch( "core.datasource.datasource_manager.DatasourceProviderService.get_datasource_credentials", @@ -337,7 +338,7 @@ def test_stream_online_results_raises_for_unsupported_stream_type(mocker): ) -def test_stream_node_events_emits_events_online_document(mocker): +def test_stream_node_events_emits_events_online_document(mocker: MockerFixture): # make manager's low-level stream produce TEXT only mocker.patch.object( DatasourceManager, @@ -370,7 +371,7 @@ def test_stream_node_events_emits_events_online_document(mocker): assert events[-1].node_run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED -def test_stream_node_events_builds_file_and_variables_from_messages(mocker): +def test_stream_node_events_builds_file_and_variables_from_messages(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -478,7 +479,7 @@ def test_stream_node_events_builds_file_and_variables_from_messages(mocker): assert events[-1].node_run_result.outputs["x"] == 1 -def test_stream_node_events_raises_when_toolfile_missing(mocker): +def test_stream_node_events_raises_when_toolfile_missing(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -526,7 +527,7 @@ def test_stream_node_events_raises_when_toolfile_missing(mocker): ) -def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker): +def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) file_in = File( @@ -580,7 +581,7 @@ def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(moc assert completed.node_run_result.outputs["datasource_type"] == DatasourceProviderType.ONLINE_DRIVE -def test_stream_node_events_skips_file_build_for_non_online_types(mocker): +def test_stream_node_events_skips_file_build_for_non_online_types(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -620,7 +621,7 @@ def test_stream_node_events_skips_file_build_for_non_online_types(mocker): assert events[-1].node_run_result.outputs["file"] is None -def test_get_upload_file_by_id_builds_file(mocker): +def test_get_upload_file_by_id_builds_file(mocker: MockerFixture): # fake UploadFile row fake_row = types.SimpleNamespace( id="fid", @@ -654,7 +655,7 @@ def test_get_upload_file_by_id_builds_file(mocker): assert f.storage_key == "k" -def test_get_upload_file_by_id_raises_when_missing(mocker): +def test_get_upload_file_by_id_raises_when_missing(mocker: MockerFixture): class _S: def __enter__(self): return self diff --git a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py index 399b531205..9c1cbe82a0 100644 --- a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py +++ b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py @@ -1,11 +1,12 @@ import httpx import pytest +from pytest_mock import MockerFixture from core.extension.api_based_extension_requestor import APIBasedExtensionRequestor from models.api_based_extension import APIBasedExtensionPoint -def test_request_success(mocker): +def test_request_success(mocker: MockerFixture): # Mock httpx.Client and its context manager mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value @@ -28,7 +29,7 @@ def test_request_success(mocker): ) -def test_request_with_ssrf_proxy(mocker): +def test_request_with_ssrf_proxy(mocker: MockerFixture): # Mock dify_config mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", "https://proxy:8081") @@ -59,7 +60,7 @@ def test_request_with_ssrf_proxy(mocker): assert mock_transport.call_count == 2 -def test_request_with_only_one_proxy_config(mocker): +def test_request_with_only_one_proxy_config(mocker: MockerFixture): # Mock dify_config with only one proxy mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", None) @@ -84,7 +85,7 @@ def test_request_with_only_one_proxy_config(mocker): assert kwargs.get("mounts") is None -def test_request_timeout(mocker): +def test_request_timeout(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -95,7 +96,7 @@ def test_request_timeout(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_connection_error(mocker): +def test_request_connection_error(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -106,7 +107,7 @@ def test_request_connection_error(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code(mocker): +def test_request_error_status_code(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -121,7 +122,7 @@ def test_request_error_status_code(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code_long_content(mocker): +def test_request_error_status_code_long_content(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) diff --git a/api/tests/unit_tests/core/helper/test_creators.py b/api/tests/unit_tests/core/helper/test_creators.py index df67d3f513..8750f6d907 100644 --- a/api/tests/unit_tests/core/helper/test_creators.py +++ b/api/tests/unit_tests/core/helper/test_creators.py @@ -8,7 +8,7 @@ from yarl import URL @pytest.fixture(autouse=True) -def _patch_creators_url(monkeypatch): +def _patch_creators_url(monkeypatch: pytest.MonkeyPatch): """Patch the module-level creators_platform_api_url for all tests.""" monkeypatch.setattr( "core.helper.creators.creators_platform_api_url", diff --git a/api/tests/unit_tests/core/ops/test_base_trace_instance.py b/api/tests/unit_tests/core/ops/test_base_trace_instance.py index ac65d13454..15a2af17ca 100644 --- a/api/tests/unit_tests/core/ops/test_base_trace_instance.py +++ b/api/tests/unit_tests/core/ops/test_base_trace_instance.py @@ -18,7 +18,7 @@ class ConcreteTraceInstance(BaseTraceInstance): @pytest.fixture -def mock_db_session(monkeypatch): +def mock_db_session(monkeypatch: pytest.MonkeyPatch): mock_session = MagicMock(spec=Session) mock_session.__enter__.return_value = mock_session mock_session.__exit__.return_value = None diff --git a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py index beb99f92cd..33a3293682 100644 --- a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py +++ b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py @@ -203,7 +203,7 @@ class DummySessionContext: @pytest.fixture(autouse=True) -def patch_provider_map(monkeypatch): +def patch_provider_map(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({"dummy": FAKE_PROVIDER_ENTRY}) ) @@ -212,7 +212,7 @@ def patch_provider_map(monkeypatch): @pytest.fixture(autouse=True) -def patch_timer_and_current_app(monkeypatch): +def patch_timer_and_current_app(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.threading.Timer", DummyTimer) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_queue", queue.Queue()) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_timer", None) @@ -227,12 +227,12 @@ def patch_timer_and_current_app(monkeypatch): @pytest.fixture(autouse=True) -def patch_sqlalchemy_session(monkeypatch): +def patch_sqlalchemy_session(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.Session", DummySessionContext) @pytest.fixture -def encryption_mocks(monkeypatch): +def encryption_mocks(monkeypatch: pytest.MonkeyPatch): encrypt_mock = MagicMock(side_effect=lambda tenant, value: f"enc-{value}") batch_decrypt_mock = MagicMock(side_effect=lambda tenant, values: [f"dec-{value}" for value in values]) obfuscate_mock = MagicMock(side_effect=lambda value: f"ob-{value}") @@ -243,7 +243,7 @@ def encryption_mocks(monkeypatch): @pytest.fixture -def mock_db(monkeypatch): +def mock_db(monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.scalars.return_value.all.return_value = ["chat"] db_mock = MagicMock() @@ -254,7 +254,7 @@ def mock_db(monkeypatch): @pytest.fixture -def workflow_repo_fixture(monkeypatch): +def workflow_repo_fixture(monkeypatch: pytest.MonkeyPatch): repo = MagicMock() repo.get_workflow_run_by_id_without_tenant.return_value = make_workflow_run() monkeypatch.setattr(TraceTask, "_get_workflow_run_repo", classmethod(lambda cls: repo)) @@ -340,13 +340,13 @@ def test_get_ops_trace_instance_handles_none_app(mock_db): assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch): +def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": False})) mock_db.get.return_value = app assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch): +def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": True, "tracing_provider": "missing"})) mock_db.get.return_value = app monkeypatch.setattr("core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({})) @@ -388,7 +388,7 @@ def test_get_app_config_through_message_id_app_model_config(mock_db): assert result.id == "cfg" -def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch): +def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): mock_db.get.return_value = None with pytest.raises(ValueError, match="Invalid tracing provider"): OpsTraceManager.update_app_tracing_config("app", True, "bad") @@ -421,7 +421,7 @@ def test_get_app_tracing_config_returns_payload(mock_db): assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == payload -def test_check_and_project_helpers(monkeypatch): +def test_check_and_project_helpers(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap( @@ -449,7 +449,7 @@ def test_check_and_project_helpers(monkeypatch): assert OpsTraceManager.get_trace_config_project_url({}, "dummy") == "url" -def test_trace_task_conversation_and_extract(monkeypatch): +def test_trace_task_conversation_and_extract(monkeypatch: pytest.MonkeyPatch): task = TraceTask(trace_type=TraceTaskName.CONVERSATION_TRACE, message_id="msg") assert task.conversation_trace(foo="bar") == {"foo": "bar"} assert task._extract_streaming_metrics(make_message_data(message_metadata="not json")) == {} @@ -525,7 +525,7 @@ def test_extract_streaming_metrics_invalid_json(): assert task._extract_streaming_metrics(fake_message) == {} -def test_trace_queue_manager_add_and_collect(monkeypatch): +def test_trace_queue_manager_add_and_collect(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -536,7 +536,7 @@ def test_trace_queue_manager_add_and_collect(monkeypatch): assert tasks == [task] -def test_trace_queue_manager_run_invokes_send(monkeypatch): +def test_trace_queue_manager_run_invokes_send(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -556,7 +556,7 @@ def test_trace_queue_manager_run_invokes_send(monkeypatch): assert called["tasks"] == [task] -def test_trace_queue_manager_send_to_celery(monkeypatch): +def test_trace_queue_manager_send_to_celery(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) diff --git a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py index a4903054e0..13cf01651e 100644 --- a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py +++ b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py @@ -19,7 +19,7 @@ import pytest @pytest.fixture -def trace_queue_manager_and_task(monkeypatch): +def trace_queue_manager_and_task(monkeypatch: pytest.MonkeyPatch): """Fixture to provide TraceQueueManager and TraceTask with delayed imports.""" module_name = "core.ops.ops_trace_manager" if module_name not in sys.modules: diff --git a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py index 1537ffacf5..d8843f0eeb 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.request import PluginInvokeContext from core.plugin.impl.agent import PluginAgentClient @@ -15,7 +17,7 @@ def _agent_provider(name: str = "agent") -> SimpleNamespace: class TestPluginAgentClient: - def test_fetch_agent_strategy_providers(self, mocker): + def test_fetch_agent_strategy_providers(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("remote") @@ -43,7 +45,7 @@ class TestPluginAgentClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.strategies[0].identity.provider == "org/plugin/remote" - def test_fetch_agent_strategy_provider(self, mocker): + def test_fetch_agent_strategy_provider(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("provider") @@ -63,7 +65,7 @@ class TestPluginAgentClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.strategies[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks_and_passes_context(self, mocker): + def test_invoke_merges_chunks_and_passes_context(self, mocker: MockerFixture): client = PluginAgentClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["raw"]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py index 5f564062d5..c2cce5d691 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py @@ -1,12 +1,13 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.plugin.impl.asset import PluginAssetManager class TestPluginAssetManager: - def test_fetch_asset_success(self, mocker): + def test_fetch_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"asset-bytes") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -16,14 +17,14 @@ class TestPluginAssetManager: assert result == b"asset-bytes" request_mock.assert_called_once_with(method="GET", path="plugin/tenant-1/asset/asset-1") - def test_fetch_asset_not_found_raises(self, mocker): + def test_fetch_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) with pytest.raises(ValueError, match="can not found asset asset-1"): manager.fetch_asset("tenant-1", "asset-1") - def test_extract_asset_success(self, mocker): + def test_extract_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"file-content") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -37,7 +38,7 @@ class TestPluginAssetManager: params={"plugin_unique_identifier": "org/plugin:1", "file_path": "README.md"}, ) - def test_extract_asset_not_found_raises(self, mocker): + def test_extract_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) diff --git a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py index 23894bd417..b154f056ca 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.plugin.endpoint.exc import EndpointSetupFailedError from core.plugin.entities.plugin_daemon import PluginDaemonInnerError @@ -39,7 +40,7 @@ class _StreamContext: class TestBasePluginClientImpl: - def test_inject_trace_headers(self, mocker): + def test_inject_trace_headers(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch("core.plugin.impl.base.dify_config.ENABLE_OTEL", True) trace_header = "00-abc-xyz-01" @@ -54,7 +55,7 @@ class TestBasePluginClientImpl: client._inject_trace_headers(headers_with_existing) assert headers_with_existing["TraceParent"] == "exists" - def test_stream_request_handles_data_lines_and_dict_payload(self, mocker): + def test_stream_request_handles_data_lines_and_dict_payload(self, mocker: MockerFixture): client = BasePluginClient() stream_mock = mocker.patch( "httpx.Client.stream", @@ -66,14 +67,14 @@ class TestBasePluginClientImpl: assert result == ["hello", "world"] assert stream_mock.call_args.kwargs["data"] == {"k": "v"} - def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker): + def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", side_effect=RuntimeError("boom")) with pytest.raises(ValueError, match="Failed to request plugin daemon"): client._request_with_plugin_daemon_response("GET", "plugin/tenant/path", bool) - def test_request_with_plugin_daemon_response_applies_transformer(self, mocker): + def test_request_with_plugin_daemon_response_applies_transformer(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", return_value=_ResponseStub({"code": 0, "message": "", "data": True})) @@ -88,14 +89,14 @@ class TestBasePluginClientImpl: assert result is True assert transformed == {"code": 0, "message": "", "data": True} - def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"error":"bad-line"}'])) with pytest.raises(ValueError, match="bad-line"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object( client, "_stream_request", return_value=iter(['{"code":-500,"message":"not-json","data":null}']) @@ -105,14 +106,14 @@ class TestBasePluginClientImpl: list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) assert exc_info.value.message == "not-json" - def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":-1,"message":"err","data":null}'])) with pytest.raises(ValueError, match="plugin daemon: err, code: -1"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":0,"message":"","data":null}'])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py index 4c5987d759..94723dcfe2 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.datasource.entities.datasource_entities import ( GetOnlineDocumentPageContentRequest, OnlineDriveBrowseFilesRequest, @@ -19,7 +21,7 @@ def _datasource_provider(name: str = "provider") -> SimpleNamespace: class TestPluginDatasourceManager: - def test_fetch_datasource_providers(self, mocker): + def test_fetch_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -52,7 +54,7 @@ class TestPluginDatasourceManager: assert result[1].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_installed_datasource_providers(self, mocker): + def test_fetch_installed_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -83,7 +85,7 @@ class TestPluginDatasourceManager: assert result[0].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_datasource_provider_local_and_remote(self, mocker): + def test_fetch_datasource_provider_local_and_remote(self, mocker: MockerFixture): manager = PluginDatasourceManager() local = manager.fetch_datasource_provider("tenant-1", "langgenius/file/file") @@ -113,7 +115,7 @@ class TestPluginDatasourceManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.datasources[0].identity.provider == "org/plugin/provider" - def test_get_website_crawl_streaming(self, mocker): + def test_get_website_crawl_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["crawl"]) @@ -132,7 +134,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_pages_streaming(self, mocker): + def test_get_online_document_pages_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["pages"]) @@ -151,7 +153,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_page_content_streaming(self, mocker): + def test_get_online_document_page_content_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["content"]) @@ -170,7 +172,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_browse_files_streaming(self, mocker): + def test_online_drive_browse_files_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["browse"]) @@ -189,7 +191,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_download_file_streaming(self, mocker): + def test_online_drive_download_file_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["download"]) @@ -208,14 +210,14 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker): + def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([SimpleNamespace(result=True)]) assert manager.validate_provider_credentials("tenant-1", "user-1", "provider", "org/plugin", {"k": "v"}) is True - def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker): + def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py index c80785aee0..05959207b1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py @@ -1,10 +1,12 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.impl.debugging import PluginDebuggingClient class TestPluginDebuggingClient: - def test_get_debugging_key(self, mocker): + def test_get_debugging_key(self, mocker: MockerFixture): client = PluginDebuggingClient() request_mock = mocker.patch.object( client, diff --git a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py index 4cf657a050..7a24cc01d1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py @@ -1,11 +1,12 @@ import pytest +from pytest_mock import MockerFixture from core.plugin.impl.endpoint import PluginEndpointClient from core.plugin.impl.exc import PluginDaemonInternalServerError class TestPluginEndpointClientImpl: - def test_create_endpoint(self, mocker): + def test_create_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -18,7 +19,7 @@ class TestPluginEndpointClientImpl: assert args[:3] == ("POST", "plugin/tenant-1/endpoint/setup", bool) assert kwargs["data"]["plugin_unique_identifier"] == "org/plugin:1" - def test_list_endpoints(self, mocker): + def test_list_endpoints(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -28,7 +29,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list" assert request_mock.call_args.kwargs["params"] == {"page": 2, "page_size": 20} - def test_list_endpoints_for_single_plugin(self, mocker): + def test_list_endpoints_for_single_plugin(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -38,7 +39,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list/plugin" assert request_mock.call_args.kwargs["params"] == {"plugin_id": "org/plugin", "page": 1, "page_size": 10} - def test_update_endpoint(self, mocker): + def test_update_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -47,7 +48,7 @@ class TestPluginEndpointClientImpl: assert result is True assert request_mock.call_args.args[:3] == ("POST", "plugin/tenant-1/endpoint/update", bool) - def test_enable_and_disable_endpoint(self, mocker): + def test_enable_and_disable_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -58,7 +59,7 @@ class TestPluginEndpointClientImpl: assert calls[0].args[1] == "plugin/tenant-1/endpoint/enable" assert calls[1].args[1] == "plugin/tenant-1/endpoint/disable" - def test_delete_endpoint_idempotent_and_re_raise(self, mocker): + def test_delete_endpoint_idempotent_and_re_raise(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response") diff --git a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py index 8c6f1c6b7f..d99a8c114f 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py @@ -1,11 +1,13 @@ import json +from pytest_mock import MockerFixture + from core.plugin.impl import exc as exc_module from core.plugin.impl.exc import PluginDaemonError, PluginInvokeError class TestPluginImplExceptions: - def test_plugin_daemon_error_str_contains_request_id(self, mocker): + def test_plugin_daemon_error_str_contains_request_id(self, mocker: MockerFixture): mocker.patch("core.plugin.impl.exc.get_request_id", return_value="req-123") error = PluginDaemonError("bad") @@ -21,7 +23,7 @@ class TestPluginImplExceptions: assert "RateLimit" in friendly assert "too many" in friendly - def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker): + def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker: MockerFixture): err = PluginInvokeError("plain text") assert err._get_error_object() == {} @@ -32,7 +34,7 @@ class TestPluginImplExceptions: err2 = PluginInvokeError("plain text") assert err2.get_error_message() == "plain text" - def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker): + def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker: MockerFixture): adapter = mocker.patch.object(exc_module, "TypeAdapter") adapter.return_value.validate_json.side_effect = RuntimeError("invalid") diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_client.py b/api/tests/unit_tests/core/plugin/impl/test_model_client.py index bcbebbb38b..6dc572310c 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_client.py @@ -4,13 +4,14 @@ import io from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.plugin.entities.plugin_daemon import PluginDaemonInnerError from core.plugin.impl.model import PluginModelClient class TestPluginModelClient: - def test_fetch_model_providers(self, mocker): + def test_fetch_model_providers(self, mocker: MockerFixture): client = PluginModelClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["provider-a"]) @@ -23,7 +24,7 @@ class TestPluginModelClient: ) assert request_mock.call_args.kwargs["params"] == {"page": 1, "page_size": 256} - def test_get_model_schema(self, mocker): + def test_get_model_schema(self, mocker: MockerFixture): client = PluginModelClient() schema = SimpleNamespace(name="schema") stream_mock = mocker.patch.object( @@ -45,7 +46,7 @@ class TestPluginModelClient: assert result is schema assert stream_mock.call_args.args[:2] == ("POST", "plugin/tenant-1/dispatch/model/schema") - def test_get_model_schema_empty_stream_returns_none(self, mocker): + def test_get_model_schema_empty_stream_returns_none(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -53,7 +54,7 @@ class TestPluginModelClient: assert result is None - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -77,7 +78,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_provider_credentials", ) - def test_validate_provider_credentials_without_dict_update(self, mocker): + def test_validate_provider_credentials_without_dict_update(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -91,13 +92,13 @@ class TestPluginModelClient: assert result is False assert credentials == {"api_key": "same"} - def test_validate_provider_credentials_empty_returns_false(self, mocker): + def test_validate_provider_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.validate_provider_credentials("tenant-1", "user-1", "org/plugin:1", "provider-a", {}) is False - def test_validate_model_credentials(self, mocker): + def test_validate_model_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -123,7 +124,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_model_credentials", ) - def test_validate_model_credentials_empty_returns_false(self, mocker): + def test_validate_model_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -132,7 +133,7 @@ class TestPluginModelClient: is False ) - def test_invoke_llm(self, mocker): + def test_invoke_llm(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk-1"]) @@ -160,7 +161,7 @@ class TestPluginModelClient: assert call_kwargs["data"]["data"]["stream"] is False assert call_kwargs["data"]["data"]["model_parameters"] == {"temperature": 0.1} - def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -182,7 +183,7 @@ class TestPluginModelClient: ) ) - def test_get_llm_num_tokens(self, mocker): + def test_get_llm_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -204,7 +205,7 @@ class TestPluginModelClient: assert result == 42 - def test_get_llm_num_tokens_empty_returns_zero(self, mocker): + def test_get_llm_num_tokens_empty_returns_zero(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -213,7 +214,7 @@ class TestPluginModelClient: == 0 ) - def test_invoke_text_embedding(self, mocker): + def test_invoke_text_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.1, 0.2]]) mocker.patch.object( @@ -233,7 +234,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_text_embedding_empty_raises(self, mocker): + def test_invoke_text_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -242,7 +243,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, ["hello"], "x" ) - def test_invoke_multimodal_embedding(self, mocker): + def test_invoke_multimodal_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.3, 0.4]]) mocker.patch.object( @@ -262,7 +263,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_multimodal_embedding_empty_raises(self, mocker): + def test_invoke_multimodal_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -271,7 +272,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, [{"type": "image"}], "x" ) - def test_get_text_embedding_num_tokens(self, mocker): + def test_get_text_embedding_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -287,7 +288,7 @@ class TestPluginModelClient: 3, ] - def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker): + def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -298,7 +299,7 @@ class TestPluginModelClient: == [] ) - def test_invoke_rerank(self, mocker): + def test_invoke_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.9]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -318,14 +319,14 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_rerank_empty_raises(self, mocker): + def test_invoke_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) with pytest.raises(ValueError, match="Failed to invoke rerank"): client.invoke_rerank("tenant-1", "user-1", "org/plugin:1", "provider-a", "rerank-a", {}, "q", ["doc-1"]) - def test_invoke_multimodal_rerank(self, mocker): + def test_invoke_multimodal_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.8]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -345,7 +346,7 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_multimodal_rerank_empty_raises(self, mocker): + def test_invoke_multimodal_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -361,7 +362,7 @@ class TestPluginModelClient: [{"type": "image"}], ) - def test_invoke_tts(self, mocker): + def test_invoke_tts(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -384,7 +385,7 @@ class TestPluginModelClient: assert result == [b"hello", b"!"] - def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -396,7 +397,7 @@ class TestPluginModelClient: with pytest.raises(ValueError, match="tts error-400"): list(client.invoke_tts("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}, "hello", "alloy")) - def test_get_tts_model_voices(self, mocker): + def test_get_tts_model_voices(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -425,13 +426,13 @@ class TestPluginModelClient: assert result == [{"name": "Alloy", "value": "alloy"}, {"name": "Echo", "value": "echo"}] - def test_get_tts_model_voices_empty_returns_list(self, mocker): + def test_get_tts_model_voices_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.get_tts_model_voices("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}) == [] - def test_invoke_speech_to_text(self, mocker): + def test_invoke_speech_to_text(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -452,7 +453,7 @@ class TestPluginModelClient: assert result == "transcribed text" assert stream_mock.call_args.kwargs["data"]["data"]["file"] == "616263" - def test_invoke_speech_to_text_empty_raises(self, mocker): + def test_invoke_speech_to_text_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -461,7 +462,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "stt-a", {}, io.BytesIO(b"abc") ) - def test_invoke_moderation(self, mocker): + def test_invoke_moderation(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -482,7 +483,7 @@ class TestPluginModelClient: assert result is True assert stream_mock.call_args.kwargs["path"] == "plugin/tenant-1/dispatch/moderation/invoke" - def test_invoke_moderation_empty_raises(self, mocker): + def test_invoke_moderation_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py index 6fb4c99432..f6c9b1c669 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py +++ b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.impl.oauth import OAuthHandler @@ -25,7 +26,7 @@ def _build_request(body: bytes = b"payload") -> Request: class TestOAuthHandler: - def test_get_authorization_url(self, mocker): + def test_get_authorization_url(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -45,7 +46,7 @@ class TestOAuthHandler: assert response.authorization_url == "https://auth.example.com" assert stream_mock.call_count == 1 - def test_get_authorization_url_no_response_raises(self, mocker): + def test_get_authorization_url_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -59,7 +60,7 @@ class TestOAuthHandler: system_credentials={}, ) - def test_get_credentials(self, mocker): + def test_get_credentials(self, mocker: MockerFixture): handler = OAuthHandler() captured_data = {} @@ -85,7 +86,7 @@ class TestOAuthHandler: assert "raw_http_request" in captured_data["data"] assert stream_mock.call_count == 1 - def test_get_credentials_no_response_raises(self, mocker): + def test_get_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -100,7 +101,7 @@ class TestOAuthHandler: request=_build_request(), ) - def test_refresh_credentials(self, mocker): + def test_refresh_credentials(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -121,7 +122,7 @@ class TestOAuthHandler: assert response.credentials == {"token": "new"} assert stream_mock.call_count == 1 - def test_refresh_credentials_no_response_raises(self, mocker): + def test_refresh_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py index 80cf46f9bb..3ae3cc18e4 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.tool import PluginToolManager @@ -15,7 +17,7 @@ def _tool_provider(name: str = "provider") -> SimpleNamespace: class TestPluginToolManager: - def test_fetch_tool_providers(self, mocker): + def test_fetch_tool_providers(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("remote") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -44,7 +46,7 @@ class TestPluginToolManager: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.tools[0].identity.provider == "org/plugin/remote" - def test_fetch_tool_provider(self, mocker): + def test_fetch_tool_provider(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("provider") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -68,7 +70,7 @@ class TestPluginToolManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.tools[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks(self, mocker): + def test_invoke_merges_chunks(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object( manager, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk"]) @@ -92,7 +94,7 @@ class TestPluginToolManager: assert merge_mock.call_count == 1 assert stream_mock.call_args.kwargs["headers"]["X-Plugin-ID"] == "org/plugin" - def test_validate_credentials_paths(self, mocker): + def test_validate_credentials_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") @@ -108,7 +110,7 @@ class TestPluginToolManager: stream_mock.return_value = iter([]) assert manager.validate_datasource_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) is False - def test_get_runtime_parameters_paths(self, mocker): + def test_get_runtime_parameters_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") diff --git a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py index 76da51c2c8..811bb7e50d 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.entities.plugin_daemon import CredentialType @@ -62,7 +63,7 @@ def _subscription_call_kwargs(method_name: str) -> dict: class TestPluginTriggerClient: - def test_fetch_trigger_providers(self, mocker): + def test_fetch_trigger_providers(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("remote") @@ -89,7 +90,7 @@ class TestPluginTriggerClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.events[0].identity.provider == "org/plugin/remote" - def test_fetch_trigger_provider(self, mocker): + def test_fetch_trigger_provider(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("provider") @@ -108,7 +109,7 @@ class TestPluginTriggerClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.events[0].identity.provider == "org/plugin/provider" - def test_invoke_trigger_event(self, mocker): + def test_invoke_trigger_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -132,7 +133,7 @@ class TestPluginTriggerClient: assert result.variables == {"ok": True} assert stream_mock.call_count == 1 - def test_invoke_trigger_event_no_response_raises(self, mocker): + def test_invoke_trigger_event_no_response_raises(self, mocker: MockerFixture): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -150,7 +151,7 @@ class TestPluginTriggerClient: payload={"payload": 1}, ) - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response_stream") @@ -163,7 +164,7 @@ class TestPluginTriggerClient: ): client.validate_provider_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) - def test_dispatch_event(self, mocker): + def test_dispatch_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -195,7 +196,7 @@ class TestPluginTriggerClient: ) @pytest.mark.parametrize("method_name", ["subscribe", "unsubscribe", "refresh"]) - def test_subscription_operations_success(self, mocker, method_name): + def test_subscription_operations_success(self, mocker: MockerFixture, method_name): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -217,7 +218,7 @@ class TestPluginTriggerClient: ("refresh", "No response received from plugin daemon for refresh"), ], ) - def test_subscription_operations_no_response(self, mocker, method_name, expected): + def test_subscription_operations_no_response(self, mocker: MockerFixture, method_name, expected): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) method = getattr(client, method_name) diff --git a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py index 3feb4159ad..2ed7c70ed9 100644 --- a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py +++ b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import BaseModel +from pytest_mock import MockerFixture from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig from core.plugin.backwards_invocation.app import PluginAppBackwardsInvocation @@ -41,7 +42,7 @@ class TestBaseBackwardsInvocation: class TestPluginAppBackwardsInvocation: - def test_fetch_app_info_workflow_path(self, mocker): + def test_fetch_app_info_workflow_path(self, mocker: MockerFixture): workflow = MagicMock() workflow.features_dict = {"feature": "v"} workflow.user_input_form.return_value = [{"name": "foo"}] @@ -57,7 +58,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"data": {"mapped": True}} mapper.assert_called_once_with(features_dict={"feature": "v"}, user_input_form=[{"name": "foo"}]) - def test_fetch_app_info_model_config_path(self, mocker): + def test_fetch_app_info_model_config_path(self, mocker: MockerFixture): model_config = MagicMock() model_config.to_dict.return_value = {"user_input_form": [{"name": "bar"}], "k": "v"} app = MagicMock(mode=AppMode.COMPLETION, app_model_config=model_config) @@ -81,7 +82,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.COMPLETION, "invoke_completion_app"), ], ) - def test_invoke_app_routes_by_mode(self, mocker, mode, route_method): + def test_invoke_app_routes_by_mode(self, mocker: MockerFixture, mode, route_method): app = MagicMock(mode=mode) user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -102,7 +103,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"routed": True} assert route.call_count == 1 - def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker): + def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker: MockerFixture): app = MagicMock(mode=AppMode.WORKFLOW) end_user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -127,7 +128,7 @@ class TestPluginAppBackwardsInvocation: get_or_create.assert_called_once_with(app) assert route.call_args.args[1] is end_user - def test_invoke_app_missing_query_for_chat_raises(self, mocker): + def test_invoke_app_missing_query_for_chat_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode=AppMode.CHAT)) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -143,7 +144,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_app_unexpected_mode_raises(self, mocker): + def test_invoke_app_unexpected_mode_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode="other")) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -166,7 +167,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.CHAT, "core.plugin.backwards_invocation.app.ChatAppGenerator.generate"), ], ) - def test_invoke_chat_app_agent_and_chat(self, mocker, mode, generator_path): + def test_invoke_chat_app_agent_and_chat(self, mocker: MockerFixture, mode, generator_path): app = MagicMock(mode=mode, workflow=None) spy = mocker.patch(generator_path, return_value={"result": "ok"}) @@ -183,7 +184,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"result": "ok"} assert spy.call_count == 1 - def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker): + def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -242,7 +243,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_workflow_app_injects_pause_state_config(self, mocker): + def test_invoke_workflow_app_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -284,7 +285,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_completion_app(self, mocker): + def test_invoke_completion_app(self, mocker: MockerFixture): spy = mocker.patch( "core.plugin.backwards_invocation.app.CompletionAppGenerator.generate", return_value={"ok": 1} ) @@ -295,7 +296,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"ok": 1} assert spy.call_count == 1 - def test_get_user_returns_end_user(self, mocker): + def test_get_user_returns_end_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [MagicMock(id="end-user")] session_ctx = MagicMock() @@ -307,7 +308,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "end-user" - def test_get_user_falls_back_to_account_user(self, mocker): + def test_get_user_falls_back_to_account_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, MagicMock(id="account-user")] session_ctx = MagicMock() @@ -319,7 +320,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "account-user" - def test_get_user_raises_when_user_not_found(self, mocker): + def test_get_user_raises_when_user_not_found(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, None] session_ctx = MagicMock() @@ -331,21 +332,21 @@ class TestPluginAppBackwardsInvocation: with pytest.raises(ValueError, match="user not found"): PluginAppBackwardsInvocation._get_user("uid") - def test_get_app_returns_app(self, mocker): + def test_get_app_returns_app(self, mocker: MockerFixture): app_obj = MagicMock(id="app") db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=app_obj))) mocker.patch("core.plugin.backwards_invocation.app.db", db) assert PluginAppBackwardsInvocation._get_app("app", "tenant") is app_obj - def test_get_app_raises_when_missing(self, mocker): + def test_get_app_raises_when_missing(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=None))) mocker.patch("core.plugin.backwards_invocation.app.db", db) with pytest.raises(ValueError, match="app not found"): PluginAppBackwardsInvocation._get_app("app", "tenant") - def test_get_app_raises_when_query_fails(self, mocker): + def test_get_app_raises_when_query_fails(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(side_effect=RuntimeError("db down")))) mocker.patch("core.plugin.backwards_invocation.app.db", db) diff --git a/api/tests/unit_tests/core/plugin/test_plugin_entities.py b/api/tests/unit_tests/core/plugin/test_plugin_entities.py index f1c4c7e700..deac0ba1da 100644 --- a/api/tests/unit_tests/core/plugin/test_plugin_entities.py +++ b/api/tests/unit_tests/core/plugin/test_plugin_entities.py @@ -5,6 +5,7 @@ from enum import StrEnum import pytest from flask import Response from pydantic import ValidationError +from pytest_mock import MockerFixture from core.plugin.entities.endpoint import EndpointEntityWithInstance from core.plugin.entities.marketplace import MarketplacePluginDeclaration, MarketplacePluginSnapshot @@ -34,7 +35,7 @@ from graphon.model_runtime.entities.message_entities import ( class TestEndpointEntity: - def test_endpoint_entity_with_instance_renders_url(self, mocker): + def test_endpoint_entity_with_instance_renders_url(self, mocker: MockerFixture): mocker.patch("core.plugin.entities.endpoint.dify_config.ENDPOINT_URL_TEMPLATE", "https://dify.test/{hook_id}") now = datetime.datetime.now(datetime.UTC) diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py index 1b114b369a..1f46634b89 100644 --- a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -1,5 +1,7 @@ from uuid import uuid4 +from pytest_mock import MockerFixture + from constants import UUID_NIL from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.prompt.utils.get_thread_messages_length import get_thread_messages_length @@ -103,7 +105,7 @@ def test_extract_thread_messages_breaks_when_parent_is_none(): assert result[0].id == id2 -def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): +def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer=""), # newest generated message should be excluded @@ -119,7 +121,7 @@ def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): mock_scalars.assert_called_once() -def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker): +def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer="latest-answer"), diff --git a/api/tests/unit_tests/core/prompt/test_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_prompt_transform.py index 5308c8e7b3..3d71e73496 100644 --- a/api/tests/unit_tests/core/prompt/test_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_prompt_transform.py @@ -209,7 +209,7 @@ class TestPromptTransform: assert result == ["only"] memory.get_history_prompt_messages.assert_called_with(max_token_limit=10, message_limit=None) - def test_append_chat_histories_extends_prompt_messages(self, monkeypatch): + def test_append_chat_histories_extends_prompt_messages(self, monkeypatch: pytest.MonkeyPatch): transform = PromptTransform() memory = MagicMock() memory_config = SimpleNamespace(window=SimpleNamespace(enabled=False, size=None)) diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 1e91c2dd88..e233bd2ef0 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -67,7 +67,7 @@ def _dataset(dataset_keyword_table=None, keyword_number=None): @pytest.fixture -def patched_runtime(monkeypatch): +def patched_runtime(monkeypatch: pytest.MonkeyPatch): session = MagicMock() db = SimpleNamespace(session=session) storage = MagicMock() @@ -151,7 +151,7 @@ def test_add_texts_without_keywords_list_always_uses_extractor(monkeypatch, patc assert set(keyword._update_segment_keywords.call_args.args[2]) == {"from-extractor"} -def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch): +def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value=None)) @@ -308,7 +308,7 @@ def test_add_and_delete_ids_from_keyword_table_helpers(): assert deleted["kw2"] == {"node-2"} -def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch): +def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) handler = MagicMock() handler.extract_keywords.return_value = ["kw-a", "kw-b"] @@ -350,7 +350,7 @@ def test_update_segment_keywords_updates_when_segment_exists(monkeypatch, patche patched_runtime.session.commit.assert_not_called() -def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): +def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value={})) monkeypatch.setattr(keyword, "_update_segment_keywords", MagicMock()) @@ -365,7 +365,7 @@ def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): keyword._save_dataset_keyword_table.assert_called_once() -def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch): +def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table(), keyword_number=2)) handler = MagicMock() handler.extract_keywords.return_value = {"auto"} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py index a4586c141b..c8ee75bf43 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py @@ -2,6 +2,8 @@ import sys import types from types import SimpleNamespace +import pytest + from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS @@ -38,7 +40,7 @@ def _install_fake_jieba_modules( monkeypatch.delitem(sys.modules, "jieba.analyse.tfidf", raising=False) -def test_init_uses_existing_default_tfidf(monkeypatch): +def test_init_uses_existing_default_tfidf(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") default_tfidf = _DummyTFIDF() analyse_module.default_tfidf = default_tfidf @@ -51,7 +53,7 @@ def test_init_uses_existing_default_tfidf(monkeypatch): assert handler._tfidf.stop_words == STOPWORDS -def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): +def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -67,7 +69,7 @@ def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): +def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -85,7 +87,7 @@ def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): +def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None _install_fake_jieba_modules(monkeypatch, analyse_module) @@ -96,7 +98,7 @@ def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): assert fallback_keywords == ["two"] -def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): +def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules(monkeypatch, analyse_module, jieba_attrs={"lcut": lambda _: ["x", "x", "y"]}) @@ -105,7 +107,7 @@ def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): assert tfidf.extract_tags("ignored", topK=1) == ["x"] -def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch): +def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules( monkeypatch, diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py index 0d969a3270..e1765b17cb 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py @@ -10,7 +10,7 @@ from core.rag.datasource.keyword.keyword_type import KeyWordType from core.rag.models.document import Document -def test_get_keyword_factory_returns_jieba_factory(monkeypatch): +def test_get_keyword_factory_returns_jieba_factory(monkeypatch: pytest.MonkeyPatch): fake_module = types.ModuleType("core.rag.datasource.keyword.jieba.jieba") class FakeJieba: @@ -27,7 +27,7 @@ def test_get_keyword_factory_raises_for_unsupported_type(): Keyword.get_keyword_factory("unsupported") -def test_keyword_initialization_uses_configured_factory(monkeypatch): +def test_keyword_initialization_uses_configured_factory(monkeypatch: pytest.MonkeyPatch): dataset = SimpleNamespace(id="dataset-1") fake_processor = MagicMock() diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index b0ecad4d0c..d38213dd89 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -182,7 +182,7 @@ class TestRetrievalServiceInternals: app.app_context.return_value.__exit__.return_value = False return app - def test_retrieve_with_attachment_ids_only(self, monkeypatch, internal_dataset): + def test_retrieve_with_attachment_ids_only(self, monkeypatch: pytest.MonkeyPatch, internal_dataset): with ( patch("core.rag.datasource.retrieval_service.RetrievalService._get_dataset", return_value=internal_dataset), patch("core.rag.datasource.retrieval_service.RetrievalService._retrieve") as mock_retrieve, @@ -699,7 +699,9 @@ class TestRetrievalServiceInternals: assert RetrievalService.format_retrieval_documents(documents) == [] - def test_format_retrieval_documents_with_parent_child_summary_and_attachments(self, monkeypatch): + def test_format_retrieval_documents_with_parent_child_summary_and_attachments( + self, monkeypatch: pytest.MonkeyPatch + ): dataset_doc_parent = SimpleNamespace( id="doc-parent", doc_form=IndexStructureType.PARENT_CHILD_INDEX, @@ -877,7 +879,7 @@ class TestRetrievalServiceInternals: assert result_by_segment_id["segment-parent-summary"].summary == "summary for parent" assert result_by_segment_id["segment-parent-summary"].child_chunks == [] - def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch): + def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch: pytest.MonkeyPatch): rollback = Mock() monkeypatch.setattr(retrieval_service_module.db.session, "rollback", rollback) monkeypatch.setattr(retrieval_service_module.db.session, "scalars", Mock(side_effect=RuntimeError("db error"))) @@ -936,7 +938,7 @@ class TestRetrievalServiceInternals: future_ok.cancel.assert_called() def test_retrieve_internal_raises_value_error_when_exceptions_exist( - self, monkeypatch, internal_dataset, internal_flask_app + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) @@ -958,7 +960,9 @@ class TestRetrievalServiceInternals: query="query", ) - def test_retrieve_internal_hybrid_weighted_attachment_flow(self, monkeypatch, internal_dataset, internal_flask_app): + def test_retrieve_internal_hybrid_weighted_attachment_flow( + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app + ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index 7b6ee97f1c..067159398d 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -102,7 +102,9 @@ def test_gen_index_struct_dict(vector_factory_module): ("HOLOGRES", "dify_vdb_hologres.hologres_vector", "HologresVectorFactory"), ], ) -def test_get_vector_factory_supported(vector_factory_module, monkeypatch, vector_type, module_path, class_name): +def test_get_vector_factory_supported( + vector_factory_module, monkeypatch: pytest.MonkeyPatch, vector_type, module_path, class_name +): expected_cls = _register_fake_factory_module(monkeypatch, module_path, class_name) result_cls = vector_factory_module.Vector.get_vector_factory(getattr(vector_factory_module.VectorType, vector_type)) @@ -119,7 +121,7 @@ class _PluginChromaFactory: """Stub used only for entry-point override test.""" -def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch): +def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch: pytest.MonkeyPatch): from importlib.metadata import EntryPoint from core.rag.datasource.vdb import vector_backend_registry as reg @@ -171,7 +173,7 @@ def test_vector_init_uses_default_and_custom_attributes(vector_factory_module): assert default_vector._vector_processor == "processor" -def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch): +def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch: pytest.MonkeyPatch): """``Vector(dataset)`` must not transitively call ``ModelManager`` during construction. The real embedding model should only be materialized on the first ``embed_*`` call (i.e. create / search paths) so cleanup paths @@ -214,7 +216,7 @@ def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_m inner_model.embed_documents.assert_called_once_with(["world"]) -def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch): +def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch: pytest.MonkeyPatch): calls = {"vector_type": None, "init_args": None} class _Factory: @@ -242,7 +244,7 @@ def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeyp assert calls["init_args"] == (vector._dataset, ["doc_id"], "embeddings") -def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch): +def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Expr: def __eq__(self, _other): return "expr" @@ -279,7 +281,7 @@ def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch) assert calls["vector_type"] == vector_factory_module.VectorType.TIDB_ON_QDRANT -def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch): +def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE", None) monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE_WHITELIST_ENABLE", False) @@ -343,7 +345,7 @@ def test_create_skips_empty_text_documents_before_embedding(vector_factory_modul vector._vector_processor.create.assert_not_called() -def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch): +def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Field: def in_(self, value): return value @@ -484,7 +486,7 @@ def test_vector_delegation_methods(vector_factory_module): vector._vector_processor.delete_by_metadata_field.assert_called_once_with("doc_id", "doc-1") -def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): +def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch: pytest.MonkeyPatch): vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() @@ -507,7 +509,7 @@ def test_search_by_file_handles_missing_and_existing_upload(vector_factory_modul assert payload["file_id"] == "file-2" -def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch): +def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch: pytest.MonkeyPatch): delete_mock = MagicMock() redis_delete = MagicMock() monkeypatch.setattr(vector_factory_module.redis_client, "delete", redis_delete) @@ -526,7 +528,7 @@ def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, redis_delete.assert_not_called() -def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch): +def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch: pytest.MonkeyPatch): model_manager = MagicMock() model_manager.get_model_instance.return_value = "model-instance" diff --git a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py index e6a06f163e..2e1c5715c2 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py @@ -39,7 +39,7 @@ class TestCSVExtractor: with pytest.raises(ValueError, match="Source column 'missing_col' not found"): extractor.extract() - def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch): + def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=False) def raise_decode(*args, **kwargs): @@ -50,7 +50,7 @@ class TestCSVExtractor: with pytest.raises(RuntimeError, match="Error loading dummy.csv"): extractor.extract() - def test_extract_autodetect_encoding_success(self, monkeypatch): + def test_extract_autodetect_encoding_success(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) attempted_encodings: list[str | None] = [] @@ -75,7 +75,7 @@ class TestCSVExtractor: assert docs[0].page_content == "id: source-1;body: hello" assert attempted_encodings == [None, "bad", "utf-8"] - def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch): + def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) def always_raise(*args, **kwargs): @@ -86,7 +86,7 @@ class TestCSVExtractor: assert extractor.extract() == [] - def test_read_from_file_re_raises_csv_error(self, monkeypatch): + def test_read_from_file_re_raises_csv_error(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv") monkeypatch.setattr(pd, "read_csv", lambda *args, **kwargs: (_ for _ in ()).throw(csv.Error("bad csv"))) diff --git a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py index d2bcc1e2c4..2b42adc716 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py @@ -45,7 +45,7 @@ class _FakeWorkbook: class TestExcelExtractor: - def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch): + def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch: pytest.MonkeyPatch): sheet_with_data = _FakeSheet( header_rows=[("Name", "Link")], data_rows=[ @@ -68,7 +68,7 @@ class TestExcelExtractor: assert docs[1].page_content == '"Name":"";"Link":"123"' assert all(doc.metadata["source"] == "/tmp/sample.xlsx" for doc in docs) - def test_extract_xls_path(self, monkeypatch): + def test_extract_xls_path(self, monkeypatch: pytest.MonkeyPatch): class FakeExcelFile: sheet_names = ["Sheet1"] diff --git a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py index 5beed88971..b4b08f57ec 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py @@ -56,7 +56,7 @@ def _patch_all_extractors(monkeypatch) -> _ExtractorFactory: class TestExtractProcessorLoaders: - def test_load_from_upload_file_return_docs_and_text(self, monkeypatch): + def test_load_from_upload_file_return_docs_and_text(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) monkeypatch.setattr( @@ -93,7 +93,9 @@ class TestExtractProcessorLoaders: ), ], ) - def test_load_from_url_builds_temp_file_with_correct_suffix(self, monkeypatch, url, headers, expected_suffix): + def test_load_from_url_builds_temp_file_with_correct_suffix( + self, monkeypatch: pytest.MonkeyPatch, url, headers, expected_suffix + ): response = SimpleNamespace(headers=headers, content=b"body") monkeypatch.setattr(processor_module.ssrf_proxy, "get", lambda *args, **kwargs: response) monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) @@ -119,11 +121,13 @@ class TestExtractProcessorLoaders: class TestExtractProcessorFileRouting: @pytest.fixture(autouse=True) - def _set_unstructured_config(self, monkeypatch): + def _set_unstructured_config(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_URL", "https://unstructured") monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_KEY", "key") - def _run_extract_for_extension(self, monkeypatch, extension: str, etl_type: str, is_automatic: bool = False): + def _run_extract_for_extension( + self, monkeypatch: pytest.MonkeyPatch, extension: str, etl_type: str, is_automatic: bool = False + ): factory = _patch_all_extractors(monkeypatch) monkeypatch.setattr(processor_module.dify_config, "ETL_TYPE", etl_type) @@ -167,7 +171,7 @@ class TestExtractProcessorFileRouting: ], ) def test_extract_routes_file_extensions_for_unstructured_mode( - self, monkeypatch, extension, expected_extractor, is_automatic + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor, is_automatic ): extractor_name, args, kwargs = self._run_extract_for_extension( monkeypatch, extension, etl_type="Unstructured", is_automatic=is_automatic @@ -189,7 +193,9 @@ class TestExtractProcessorFileRouting: (".txt", "TextExtractor"), ], ) - def test_extract_routes_file_extensions_for_default_mode(self, monkeypatch, extension, expected_extractor): + def test_extract_routes_file_extensions_for_default_mode( + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor + ): extractor_name, _, _ = self._run_extract_for_extension(monkeypatch, extension, etl_type="SelfHosted") assert extractor_name == expected_extractor @@ -202,7 +208,7 @@ class TestExtractProcessorFileRouting: class TestExtractProcessorDatasourceRouting: - def test_extract_routes_notion_datasource(self, monkeypatch): + def test_extract_routes_notion_datasource(self, monkeypatch: pytest.MonkeyPatch): factory = _patch_all_extractors(monkeypatch) notion_info = SimpleNamespace( @@ -228,7 +234,9 @@ class TestExtractProcessorDatasourceRouting: ("jinareader", "JinaReaderWebExtractor"), ], ) - def test_extract_routes_website_datasource_providers(self, monkeypatch, provider: str, expected: str): + def test_extract_routes_website_datasource_providers( + self, monkeypatch: pytest.MonkeyPatch, provider: str, expected: str + ): factory = _patch_all_extractors(monkeypatch) website_info = SimpleNamespace( diff --git a/api/tests/unit_tests/core/rag/extractor/test_helpers.py b/api/tests/unit_tests/core/rag/extractor/test_helpers.py index 74387f749d..1c6f97ec53 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_helpers.py +++ b/api/tests/unit_tests/core/rag/extractor/test_helpers.py @@ -21,7 +21,7 @@ class TestHelpers: # Assert the language field for full coverage assert encodings[0].language is not None - def test_detect_file_encodings_timeout(self, monkeypatch): + def test_detect_file_encodings_timeout(self, monkeypatch: pytest.MonkeyPatch): class FakeFuture: def result(self, timeout=None): raise helpers.concurrent.futures.TimeoutError() @@ -41,7 +41,7 @@ class TestHelpers: with pytest.raises(TimeoutError, match="Timeout reached while detecting encoding"): detect_file_encodings("file.txt", timeout=1) - def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch): + def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch: pytest.MonkeyPatch): class FakeResult: encoding = None coherence = 0.0 diff --git a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py index 7e78c86c7d..8ede44ec04 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py @@ -74,7 +74,7 @@ after assert "[link]" not in tups[1][1] assert "img.png" not in tups[1][1] - def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch): + def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=True) calls: list[str | None] = [] @@ -99,7 +99,7 @@ after assert len(tups) == 2 assert calls == [None, "bad-encoding", "utf-8"] - def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch): + def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=False) def raise_decode(self, encoding=None): @@ -110,7 +110,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch): + def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") def raise_other(self, encoding=None): @@ -121,7 +121,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch): + def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") monkeypatch.setattr(extractor, "parse_tups", lambda _: [(None, "plain"), ("Header", "value")]) diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index 808e41867e..49f7b592dc 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -28,7 +28,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "token" - def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch): + def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -46,7 +46,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "env-token" - def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch): + def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -63,7 +63,7 @@ class TestNotionExtractorInitAndPublicMethods: credential_id="cred", ) - def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch): + def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -83,7 +83,7 @@ class TestNotionExtractorInitAndPublicMethods: load_mock.assert_called_once_with("obj", "page") assert len(docs) == 1 - def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch): + def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -394,7 +394,7 @@ class TestNotionMetadataAndCredentialMethods: assert extractor.update_last_edited_time(None) is None - def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch): + def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -479,7 +479,7 @@ class TestNotionMetadataAndCredentialMethods: with pytest.raises(AssertionError, match="Notion access token is required"): extractor.get_notion_last_edited_time() - def test_get_access_token_success_and_errors(self, monkeypatch): + def test_get_access_token_success_and_errors(self, monkeypatch: pytest.MonkeyPatch): with pytest.raises(Exception, match="No credential id found"): notion_extractor.NotionExtractor._get_access_token("tenant", None) diff --git a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py index 47222a23a2..f2caf02d5e 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py @@ -7,7 +7,7 @@ import core.rag.extractor.pdf_extractor as pe @pytest.fixture -def mock_dependencies(monkeypatch): +def mock_dependencies(monkeypatch: pytest.MonkeyPatch): # Mock storage saves = [] @@ -61,7 +61,9 @@ def mock_dependencies(monkeypatch): (b"\x89PNG\r\n\x1a\n some png", "image/png", "png", "test_file_id_png"), ], ) -def test_extract_images_formats(mock_dependencies, monkeypatch, image_bytes, expected_mime, expected_ext, file_id): +def test_extract_images_formats( + mock_dependencies, monkeypatch: pytest.MonkeyPatch, image_bytes, expected_mime, expected_ext, file_id +): saves = mock_dependencies.saves db_stub = mock_dependencies.db @@ -122,7 +124,7 @@ def test_extract_images_get_objects_scenarios(mock_dependencies, get_objects_sid assert result == "" -def test_extract_calls_extract_images(mock_dependencies, monkeypatch): +def test_extract_calls_extract_images(mock_dependencies, monkeypatch: pytest.MonkeyPatch): # Mock pypdfium2 mock_pdf_doc = MagicMock() mock_page = MagicMock() diff --git a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py index fb3c6e52c6..71046d73af 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py @@ -19,7 +19,7 @@ class TestTextExtractor: assert docs[0].page_content == "hello world" assert docs[0].metadata == {"source": str(file_path)} - def test_extract_autodetect_success_after_decode_error(self, monkeypatch): + def test_extract_autodetect_success_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) calls = [] @@ -44,7 +44,7 @@ class TestTextExtractor: assert docs[0].page_content == "decoded text" assert calls == [None, "bad", "utf-8"] - def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch): + def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) def always_decode_error(self, encoding=None): @@ -56,7 +56,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="all detected encodings failed"): extractor.extract() - def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch): + def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=False) def always_decode_error(self, encoding=None): @@ -67,7 +67,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="specified encoding failed"): extractor.extract() - def test_extract_wraps_non_decode_exceptions(self, monkeypatch): + def test_extract_wraps_non_decode_exceptions(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt") def raise_other(self, encoding=None): diff --git a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py index b9f2449cfb..513d232d7f 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py @@ -61,7 +61,7 @@ def test_parse_row(): assert extractor._parse_row(row, {}, 3) == gt[idx] -def test_init_downloads_via_ssrf_proxy(monkeypatch): +def test_init_downloads_via_ssrf_proxy(monkeypatch: pytest.MonkeyPatch): doc = Document() doc.add_paragraph("hello") buf = io.BytesIO() @@ -97,7 +97,7 @@ def test_init_downloads_via_ssrf_proxy(monkeypatch): extractor.temp_file.close() -def test_extract_images_from_docx(monkeypatch): +def test_extract_images_from_docx(monkeypatch: pytest.MonkeyPatch): external_bytes = b"ext-bytes" internal_bytes = b"int-bytes" @@ -210,7 +210,7 @@ def test_extract_images_from_docx_uses_internal_files_url(): dify_config.INTERNAL_FILES_URL = original_internal_files_url -def test_extract_hyperlinks(monkeypatch): +def test_extract_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage to avoid issues during image extraction (even if no images are present) monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -255,7 +255,7 @@ def test_extract_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_extract_legacy_hyperlinks(monkeypatch): +def test_extract_legacy_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -317,7 +317,7 @@ def test_extract_legacy_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_init_rejects_invalid_url_status(monkeypatch): +def test_init_rejects_invalid_url_status(monkeypatch: pytest.MonkeyPatch): class FakeResponse: status_code = 404 content = b"" @@ -392,7 +392,7 @@ def test_close_closes_awaitable_close_result(): extractor.temp_file.close.assert_called_once() -def test_extract_images_handles_invalid_external_cases(monkeypatch): +def test_extract_images_handles_invalid_external_cases(monkeypatch: pytest.MonkeyPatch): class FakeTargetRef: def __contains__(self, item): return item == "image" @@ -437,7 +437,7 @@ def test_extract_images_handles_invalid_external_cases(monkeypatch): db_stub.session.commit.assert_called_once() -def test_table_to_markdown_and_parse_helpers(monkeypatch): +def test_table_to_markdown_and_parse_helpers(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) table = SimpleNamespace( @@ -500,7 +500,7 @@ def test_table_to_markdown_and_parse_helpers(monkeypatch): assert extractor._parse_cell(cell, image_map) == "EXT-IMGINT-IMGplain" -def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch): +def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) ext_image_id = "ext-image" diff --git a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py index 26ce333e11..19fb385a6d 100644 --- a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py +++ b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py @@ -45,7 +45,7 @@ def _install_chunk_by_title(monkeypatch: pytest.MonkeyPatch, chunks: list[Simple class TestUnstructuredMarkdownMsgXml: - def test_markdown_extractor_without_api(self, monkeypatch): + def test_markdown_extractor_without_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" chunk-1 "), SimpleNamespace(text=" chunk-2 ")]) _register_module( monkeypatch, "unstructured.partition.md", partition_md=lambda filename: [SimpleNamespace(text="x")] @@ -55,7 +55,7 @@ class TestUnstructuredMarkdownMsgXml: assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_markdown_extractor_with_api(self, monkeypatch): + def test_markdown_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" via-api ")]) calls = {} @@ -70,7 +70,7 @@ class TestUnstructuredMarkdownMsgXml: assert docs[0].page_content == "via-api" assert calls == {"filename": "/tmp/file.md", "api_url": "https://u", "api_key": "k"} - def test_msg_extractor_local(self, monkeypatch): + def test_msg_extractor_local(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) _register_module( monkeypatch, "unstructured.partition.msg", partition_msg=lambda filename: [SimpleNamespace(text="x")] @@ -78,7 +78,7 @@ class TestUnstructuredMarkdownMsgXml: assert UnstructuredMsgExtractor("/tmp/file.msg").extract()[0].page_content == "msg-doc" - def test_msg_extractor_with_api(self, monkeypatch): + def test_msg_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) calls = {} @@ -94,7 +94,7 @@ class TestUnstructuredMarkdownMsgXml: ) assert calls["filename"] == "/tmp/file.msg" - def test_xml_extractor_local_and_api(self, monkeypatch): + def test_xml_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="xml-doc")]) xml_calls = {} @@ -124,7 +124,7 @@ class TestUnstructuredMarkdownMsgXml: class TestUnstructuredEmailAndEpub: - def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch): + def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) captured = {} @@ -150,7 +150,7 @@ class TestUnstructuredEmailAndEpub: assert "Hello Email" in chunk_elements[0].text assert chunk_elements[1].text == bad_base64 - def test_email_extractor_with_api(self, monkeypatch): + def test_email_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="api-email")]) _register_module( monkeypatch, @@ -162,7 +162,7 @@ class TestUnstructuredEmailAndEpub: assert docs[0].page_content == "api-email" - def test_epub_extractor_local_and_api(self, monkeypatch): + def test_epub_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="epub-doc")]) calls = {"download": 0, "partition": 0} @@ -198,7 +198,7 @@ class TestUnstructuredPPTAndPPTX: with pytest.raises(NotImplementedError, match="Unstructured API Url is not configured"): UnstructuredPPTExtractor("/tmp/file.ppt").extract() - def test_ppt_extractor_groups_text_by_page(self, monkeypatch): + def test_ppt_extractor_groups_text_by_page(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -215,7 +215,7 @@ class TestUnstructuredPPTAndPPTX: assert [doc.page_content for doc in docs] == ["A\nB", "C"] - def test_pptx_extractor_local_and_api(self, monkeypatch): + def test_pptx_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -244,7 +244,7 @@ class TestUnstructuredPPTAndPPTX: class TestUnstructuredWord: - def _install_doc_modules(self, monkeypatch, version: str, filetype_value): + def _install_doc_modules(self, monkeypatch: pytest.MonkeyPatch, version: str, filetype_value): _register_unstructured_packages(monkeypatch) class FileType: @@ -276,13 +276,13 @@ class TestUnstructuredWord: ], ) - def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch): + def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="doc") with pytest.raises(ValueError, match="Partitioning .doc files is only supported"): UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() - def test_word_extractor_doc_and_docx_paths(self, monkeypatch): + def test_word_extractor_doc_and_docx_paths(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.11", filetype_value="doc") docs = UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() @@ -292,7 +292,7 @@ class TestUnstructuredWord: docs = UnstructuredWordExtractor("/tmp/file.docx", "https://u", "k").extract() assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch): + def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="not-used") monkeypatch.setitem(sys.modules, "magic", None) diff --git a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py index d758be218a..95878fc688 100644 --- a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py @@ -59,7 +59,7 @@ class TestWaterCrawlExceptions: class TestBaseAPIClient: - def test_init_session_builds_expected_headers(self, monkeypatch): + def test_init_session_builds_expected_headers(self, monkeypatch: pytest.MonkeyPatch): captured = {} def fake_client(**kwargs): @@ -74,7 +74,7 @@ class TestBaseAPIClient: assert captured["headers"]["X-API-Key"] == "k" assert captured["headers"]["User-Agent"] == "WaterCrawl-Plugin" - def test_request_stream_and_non_stream_paths(self, monkeypatch): + def test_request_stream_and_non_stream_paths(self, monkeypatch: pytest.MonkeyPatch): class FakeSession: def __init__(self): self.request_calls = [] @@ -106,7 +106,7 @@ class TestBaseAPIClient: assert fake_session.build_calls assert fake_session.send_calls[0][1] is True - def test_http_method_helpers_delegate_to_request(self, monkeypatch): + def test_http_method_helpers_delegate_to_request(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(BaseAPIClient, "init_session", lambda self: MagicMock()) client = BaseAPIClient(api_key="k", base_url="https://watercrawl.dev") @@ -127,7 +127,7 @@ class TestBaseAPIClient: class TestWaterCrawlAPIClient: - def test_process_eventstream_and_download(self, monkeypatch): + def test_process_eventstream_and_download(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = MagicMock() @@ -174,7 +174,7 @@ class TestWaterCrawlAPIClient: client.process_response(_response(200, content_type="application/octet-stream", content=b"bin")) == b"bin" ) - def test_process_response_event_stream_returns_generator(self, monkeypatch): + def test_process_response_event_stream_returns_generator(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") generator = (item for item in [{"type": "result", "data": {}}]) monkeypatch.setattr(client, "process_eventstream", lambda response, download=False: generator) @@ -193,7 +193,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(RuntimeError, match="http error"): client.process_response(response) - def test_endpoint_wrappers(self, monkeypatch): + def test_endpoint_wrappers(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda resp: "processed") @@ -208,7 +208,7 @@ class TestWaterCrawlAPIClient: assert client.download_crawl_request("id") == "processed" assert client.get_crawl_request_results("id") == "processed" - def test_monitor_crawl_request_generator_and_validation(self, monkeypatch): + def test_monitor_crawl_request_generator_and_validation(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda _: (x for x in [{"type": "result", "data": 1}])) @@ -221,7 +221,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(ValueError, match="Generator expected"): list(client.monitor_crawl_request("job-1")) - def test_scrape_url_sync_and_async(self, monkeypatch): + def test_scrape_url_sync_and_async(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "create_crawl_request", lambda **kwargs: {"uuid": "job-1"}) @@ -238,7 +238,7 @@ class TestWaterCrawlAPIClient: sync_result = client.scrape_url("https://example.com", sync=True) assert sync_result == {"url": "https://example.com"} - def test_download_result_fetches_json_and_closes(self, monkeypatch): + def test_download_result_fetches_json_and_closes(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = _response(200, {"markdown": "body"}) @@ -251,7 +251,7 @@ class TestWaterCrawlAPIClient: class TestWaterCrawlProvider: - def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch): + def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") captured_kwargs = {} @@ -290,7 +290,7 @@ class TestWaterCrawlProvider: assert captured_kwargs["page_options"]["only_main_content"] is False assert captured_kwargs["page_options"]["wait_time"] == 1000 - def test_get_crawl_status_active_and_completed(self, monkeypatch): + def test_get_crawl_status_active_and_completed(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( @@ -327,7 +327,7 @@ class TestWaterCrawlProvider: assert completed["status"] == "completed" assert completed["data"] == [{"url": "u"}] - def test_get_crawl_url_data_and_scrape(self, monkeypatch): + def test_get_crawl_url_data_and_scrape(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr(provider, "scrape_url", lambda url: {"source_url": url}) @@ -339,7 +339,7 @@ class TestWaterCrawlProvider: monkeypatch.setattr(provider, "_get_results", lambda job_id, query_params=None: iter([])) assert provider.get_crawl_url_data("job", "u1") is None - def test_structure_data_validation_and_get_results_pagination(self, monkeypatch): + def test_structure_data_validation_and_get_results_pagination(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") with pytest.raises(ValueError, match="Invalid result object"): @@ -380,7 +380,7 @@ class TestWaterCrawlProvider: assert len(results) == 1 assert results[0]["source_url"] == "https://a" - def test_scrape_url_uses_client_and_structure(self, monkeypatch): + def test_scrape_url_uses_client_and_structure(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( provider.client, "scrape_url", lambda **kwargs: {"result": {"metadata": {}, "markdown": "m"}, "url": "u"} @@ -392,7 +392,7 @@ class TestWaterCrawlProvider: class TestWaterCrawlWebExtractor: - def test_extract_crawl_and_scrape_modes(self, monkeypatch): + def test_extract_crawl_and_scrape_modes(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: { @@ -418,7 +418,7 @@ class TestWaterCrawlWebExtractor: assert crawl_extractor.extract()[0].page_content == "crawl" assert scrape_extractor.extract()[0].page_content == "scrape" - def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch): + def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: None, diff --git a/api/tests/unit_tests/core/telemetry/test_facade.py b/api/tests/unit_tests/core/telemetry/test_facade.py index 36e8e1bbb1..95d653f55b 100644 --- a/api/tests/unit_tests/core/telemetry/test_facade.py +++ b/api/tests/unit_tests/core/telemetry/test_facade.py @@ -14,7 +14,7 @@ from core.telemetry.events import TelemetryContext, TelemetryEvent @pytest.fixture -def telemetry_test_setup(monkeypatch): +def telemetry_test_setup(monkeypatch: pytest.MonkeyPatch): module_name = "core.ops.ops_trace_manager" ops_stub = types.ModuleType(module_name) diff --git a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py index ad6d5906ae..b21a5c3e24 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py @@ -78,7 +78,7 @@ def _tool_yaml() -> dict[str, Any]: } -def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch): +def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch: pytest.MonkeyPatch): yaml_payloads = [_provider_yaml(), _tool_yaml()] def _load_yaml(*args, **kwargs): diff --git a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py index c7829fc0d7..3f6b1ec154 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py @@ -115,7 +115,7 @@ def test_weekday_tool(): list(weekday_tool.invoke(user_id="u", tool_parameters={"year": 2024, "day": 1})) -def test_simple_code_valid_execution(monkeypatch): +def test_simple_code_valid_execution(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -138,7 +138,7 @@ def test_simple_code_invalid_language(): list(simple_code.invoke(user_id="u", tool_parameters={"language": "go", "code": "fmt.Println(1)"})) -def test_simple_code_execution_error(monkeypatch): +def test_simple_code_execution_error(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -155,14 +155,14 @@ def test_webscraper_empty_url(): assert empty == "Please input url" -def test_webscraper_fetch(monkeypatch): +def test_webscraper_fetch(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") full = list(webscraper.invoke(user_id="u", tool_parameters={"url": "https://example.com"}))[0].message.text assert full == "page" -def test_webscraper_summary(monkeypatch): +def test_webscraper_summary(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") monkeypatch.setattr(webscraper, "summary", lambda user_id, content: "summary") @@ -175,7 +175,7 @@ def test_webscraper_summary(monkeypatch): assert summarized == "summary" -def test_webscraper_fetch_error(monkeypatch): +def test_webscraper_fetch_error(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr( "core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", @@ -192,7 +192,7 @@ def test_asr_invalid_file(): assert "not a valid audio file" in invalid_file -def test_asr_valid_file_invocation(monkeypatch): +def test_asr_valid_file_invocation(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) model_instance = type("M", (), {"invoke_speech2text": lambda self, file: "transcript"})() model_manager = type("Mgr", (), {"get_model_instance": lambda *a, **k: model_instance})() @@ -209,7 +209,7 @@ def test_asr_valid_file_invocation(monkeypatch): assert captured_manager_kwargs == {"tenant_id": "tenant-1", "user_id": "u"} -def test_asr_available_models_and_runtime_parameters(monkeypatch): +def test_asr_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) provider_model = type("PM", (), {"provider": "p", "models": [type("Model", (), {"model": "m"})()]})() monkeypatch.setattr( @@ -220,7 +220,7 @@ def test_asr_available_models_and_runtime_parameters(monkeypatch): assert asr.get_runtime_parameters()[0].name == "model" -def test_tts_invoke_returns_messages(monkeypatch): +def test_tts_invoke_returns_messages(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) captured_manager_kwargs = {} voices_model_instance = type( @@ -280,7 +280,7 @@ def test_tts_tool_raises_when_voice_unavailable(monkeypatch, voices): list(tts.invoke(user_id="u", tool_parameters={"model": "p#m", "text": "hello"})) -def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): +def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) model_1 = SimpleNamespace( @@ -307,7 +307,7 @@ def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): assert runtime_parameters[1].name == "voice#provider-a#model-a" -def test_provider_classes_and_builtin_sort(monkeypatch): +def test_provider_classes_and_builtin_sort(monkeypatch: pytest.MonkeyPatch): # Use object.__new__ to avoid YAML-loading __init__; only pass-through validation is exercised. # Ensure pass-through _validate_credentials methods are executed. AudioToolProvider._validate_credentials(object.__new__(AudioToolProvider), "u", {}) diff --git a/api/tests/unit_tests/core/tools/test_custom_tool.py b/api/tests/unit_tests/core/tools/test_custom_tool.py index f35546b025..f525baeaf2 100644 --- a/api/tests/unit_tests/core/tools/test_custom_tool.py +++ b/api/tests/unit_tests/core/tools/test_custom_tool.py @@ -47,7 +47,7 @@ def test_parsed_response_to_string(): assert ParsedResponse("ok", False).to_string() == "ok" -def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch): +def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch: pytest.MonkeyPatch): tool = _build_tool() forked = tool.fork_tool_runtime(ToolRuntime(tenant_id="tenant-2")) assert isinstance(forked, ApiTool) @@ -184,7 +184,7 @@ def test_get_parameter_value_and_type_conversion_helpers(): assert tool._convert_body_property_type({"anyOf": [{"type": "integer"}]}, "2") == 2 -def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch): +def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [ {"name": "id", "in": "path", "required": True, "schema": {"type": "string"}}, @@ -236,7 +236,7 @@ def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch invalid_method_tool.do_http_request("https://api.example.com", "TRACE", headers={}, parameters={}) -def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch): +def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [], "requestBody": { diff --git a/api/tests/unit_tests/core/tools/test_tool_manager.py b/api/tests/unit_tests/core/tools/test_tool_manager.py index c9b3dfb186..7c7d6eec2d 100644 --- a/api/tests/unit_tests/core/tools/test_tool_manager.py +++ b/api/tests/unit_tests/core/tools/test_tool_manager.py @@ -648,7 +648,7 @@ def test_list_default_builtin_providers_for_postgres_and_mysql(): assert providers == provider_records -def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch): +def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch: pytest.MonkeyPatch): hardcoded_controller = SimpleNamespace(entity=SimpleNamespace(identity=SimpleNamespace(name="hardcoded"))) plugin_controller = object.__new__(PluginToolProviderController) plugin_controller.entity = SimpleNamespace(identity=SimpleNamespace(name="plugin-provider")) diff --git a/api/tests/unit_tests/core/tools/utils/test_configuration.py b/api/tests/unit_tests/core/tools/utils/test_configuration.py index ae5638784c..9e179536de 100644 --- a/api/tests/unit_tests/core/tools/utils/test_configuration.py +++ b/api/tests/unit_tests/core/tools/utils/test_configuration.py @@ -4,6 +4,8 @@ from collections.abc import Generator from typing import Any from unittest.mock import patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom from core.helper.tool_parameter_cache import ToolParameterCache from core.tools.__base.tool import Tool @@ -110,7 +112,7 @@ def test_encrypt_tool_parameters(): assert encrypted["plain"] == "x" -def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch): +def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( @@ -139,7 +141,7 @@ def test_delete_tool_parameters_cache(): mock_delete.assert_called_once() -def test_configuration_manager_decrypt_suppresses_errors(monkeypatch): +def test_configuration_manager_decrypt_suppresses_errors(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( patch.object(ToolParameterCache, "get", return_value=None), diff --git a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py index 5f34135af4..354b395504 100644 --- a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py +++ b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py @@ -42,7 +42,7 @@ class _FakeToolFileManager: @pytest.fixture(autouse=True) -def _patch_tool_file_manager(monkeypatch): +def _patch_tool_file_manager(monkeypatch: pytest.MonkeyPatch): # Patch the manager used inside the transformer module monkeypatch.setattr(mt, "ToolFileManager", _FakeToolFileManager) # also ensure predictable URL generation (no need to patch; uses id and extension only) diff --git a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py index 6bb86ebe78..081b189745 100644 --- a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py +++ b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py @@ -34,7 +34,7 @@ def test_system_encrypter_raises_error_for_invalid_ciphertext(): encrypter.decrypt_params("not-base64") -def test_system_helpers_use_global_cached_instance(monkeypatch): +def test_system_helpers_use_global_cached_instance(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(encryption, "_encrypter", None) monkeypatch.setattr("core.tools.utils.system_encryption.dify_config.SECRET_KEY", "global-secret") diff --git a/api/tests/unit_tests/core/variables/test_segment_type.py b/api/tests/unit_tests/core/variables/test_segment_type.py index d4e862220a..baa2ac2dc7 100644 --- a/api/tests/unit_tests/core/variables/test_segment_type.py +++ b/api/tests/unit_tests/core/variables/test_segment_type.py @@ -233,7 +233,7 @@ class TestSegmentTypeAdditionalMethods: assert SegmentType.GROUP.is_valid([StringSegment(value="b")]) is True assert SegmentType.GROUP.is_valid(["not-segment"]) is False - def test_unreachable_assertion_branch(self, monkeypatch): + def test_unreachable_assertion_branch(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(SegmentType, "is_array_type", lambda self: False) with pytest.raises(AssertionError, match="unreachable"): diff --git a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py index d7ef781732..a18a36a099 100644 --- a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GraphParams: call_depth = 0 -def test_datasource_node_delegates_to_manager_stream(mocker): +def test_datasource_node_delegates_to_manager_stream(mocker: MockerFixture): # prepare sys variables sys_vars = { "sys": { diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py index f254fc3d09..89433b34e6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.rag.index_processor.constant.index_type import IndexTechniqueType @@ -50,7 +51,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_index_processor(mocker): +def mock_index_processor(mocker: MockerFixture): """Create mock IndexProcessorProtocol.""" mock_processor = Mock(spec=IndexProcessorProtocol) mocker.patch( @@ -61,7 +62,7 @@ def mock_index_processor(mocker): @pytest.fixture -def mock_summary_index_service(mocker): +def mock_summary_index_service(mocker: MockerFixture): """Create mock SummaryIndexServiceProtocol.""" mock_service = Mock(spec=SummaryIndexServiceProtocol) mocker.patch( diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py index e923ee761b..d77a2ce363 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.workflow.nodes.knowledge_retrieval.entities import ( @@ -56,7 +57,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_rag_retrieval(mocker): +def mock_rag_retrieval(mocker: MockerFixture): """Create mock RAGRetrievalProtocol.""" mock_retrieval = Mock(spec=RAGRetrievalProtocol) mock_retrieval.knowledge_retrieval.return_value = [] diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py index c707cf28cd..c09f2d3fb6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py @@ -222,7 +222,7 @@ def llm_node( @pytest.fixture -def model_config(monkeypatch): +def model_config(monkeypatch: pytest.MonkeyPatch): from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass def mock_model_providers(_self): @@ -1276,7 +1276,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown: mock_file_saver.save_binary_string.assert_not_called() mock_file_saver.save_remote_url.assert_not_called() - def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch): + def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch: pytest.MonkeyPatch): llm_node, mock_file_saver = llm_node_for_multimodal image_raw_data = b"PNG_DATA" diff --git a/api/tests/unit_tests/core/workflow/test_node_factory.py b/api/tests/unit_tests/core/workflow/test_node_factory.py index 1821f72e0c..e93a7c7ccd 100644 --- a/api/tests/unit_tests/core/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/workflow/test_node_factory.py @@ -88,7 +88,7 @@ class TestFetchMemory: assert result is None - def test_returns_none_when_conversation_does_not_exist(self, monkeypatch): + def test_returns_none_when_conversation_does_not_exist(self, monkeypatch: pytest.MonkeyPatch): class FakeSelect: def where(self, *_args): return self @@ -119,7 +119,7 @@ class TestFetchMemory: assert result is None - def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch): + def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch: pytest.MonkeyPatch): conversation = sentinel.conversation memory = sentinel.memory @@ -189,7 +189,7 @@ class TestDifyGraphInitContext: class TestDefaultWorkflowCodeExecutor: - def test_execute_delegates_to_code_executor(self, monkeypatch): + def test_execute_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): executor = node_factory.DefaultWorkflowCodeExecutor() execute_workflow_code_template = MagicMock(return_value={"answer": "ok"}) monkeypatch.setattr( @@ -219,7 +219,7 @@ class TestDefaultWorkflowCodeExecutor: class TestCodeExecutorJinja2TemplateRenderer: - def test_render_template_delegates_to_code_executor(self, monkeypatch): + def test_render_template_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() execute_workflow_code_template = MagicMock(return_value={"result": "Hello workflow"}) monkeypatch.setattr( @@ -237,7 +237,7 @@ class TestCodeExecutorJinja2TemplateRenderer: inputs={"name": "workflow"}, ) - def test_render_template_wraps_code_execution_errors(self, monkeypatch): + def test_render_template_wraps_code_execution_errors(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() monkeypatch.setattr( workflow_template_rendering.CodeExecutor, @@ -434,7 +434,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: missing"): factory.create_node({"id": "node-id", "data": {"type": "missing"}}) - def test_rejects_missing_class_mapping(self, monkeypatch, factory): + def test_rejects_missing_class_mapping(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -444,7 +444,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_rejects_missing_latest_class(self, monkeypatch, factory): + def test_rejects_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -454,7 +454,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No latest version class found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_uses_version_specific_class_when_available(self, monkeypatch, factory): + def test_uses_version_specific_class_when_available(self, monkeypatch: pytest.MonkeyPatch, factory): matched_node = sentinel.matched_node latest_node_class = _node_constructor(return_value=sentinel.latest_node) matched_node_class = _node_constructor(return_value=matched_node) @@ -475,7 +475,9 @@ class TestDifyNodeFactoryCreateNode: assert kwargs["graph_runtime_state"] is factory.graph_runtime_state latest_node_class.assert_not_called() - def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing(self, monkeypatch, factory): + def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing( + self, monkeypatch: pytest.MonkeyPatch, factory + ): latest_node = sentinel.latest_node latest_node_class = _node_constructor(return_value=latest_node) monkeypatch.setattr( @@ -507,7 +509,7 @@ class TestDifyNodeFactoryCreateNode: (BuiltinNodeTypes.DOCUMENT_EXTRACTOR, "DocumentExtractorNode"), ], ) - def test_creates_specialized_nodes(self, monkeypatch, factory, node_type, constructor_name): + def test_creates_specialized_nodes(self, monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name): created_node = object() constructor = _node_constructor(return_value=created_node) constructor._mock_name = constructor_name @@ -597,7 +599,9 @@ class TestDifyNodeFactoryCreateNode: prepared_llm.assert_called_once_with(sentinel.model_instance) assert kwargs["model_instance"] is wrapped_model_instance - def test_create_node_passes_alias_preserving_llm_config_to_constructor(self, monkeypatch, factory): + def test_create_node_passes_alias_preserving_llm_config_to_constructor( + self, monkeypatch: pytest.MonkeyPatch, factory + ): created_node = object() constructor = _node_constructor(return_value=created_node) monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor)) @@ -665,7 +669,7 @@ class TestDifyNodeFactoryCreateNode: ) def test_creates_model_backed_nodes( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name, @@ -726,7 +730,7 @@ class TestDifyNodeFactoryModelInstance: factory._llm_model_factory = sentinel.model_factory return factory - def test_delegates_to_fetch_model_config(self, monkeypatch, factory): + def test_delegates_to_fetch_model_config(self, monkeypatch: pytest.MonkeyPatch, factory): node_data_model = SimpleNamespace( provider="provider", name="model", @@ -755,7 +759,7 @@ class TestDifyNodeFactoryModelInstance: model_factory=sentinel.model_factory, ) - def test_propagates_fetch_model_config_errors(self, monkeypatch, factory): + def test_propagates_fetch_model_config_errors(self, monkeypatch: pytest.MonkeyPatch, factory): fetch_model_config = MagicMock(side_effect=ValueError("broken model config")) monkeypatch.setattr(node_factory, "fetch_model_config", fetch_model_config) @@ -780,7 +784,7 @@ class TestDifyNodeFactoryMemory: assert result is None factory.graph_runtime_state.variable_pool.get.assert_not_called() - def test_uses_string_segment_conversation_id(self, monkeypatch, factory): + def test_uses_string_segment_conversation_id(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = StringSegment(value="conversation-id") fetch_memory = MagicMock(return_value=sentinel.memory) @@ -800,7 +804,7 @@ class TestDifyNodeFactoryMemory: model_instance=sentinel.model_instance, ) - def test_ignores_non_string_segment_conversation_ids(self, monkeypatch, factory): + def test_ignores_non_string_segment_conversation_ids(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = sentinel.segment fetch_memory = MagicMock(return_value=sentinel.memory) diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry.py b/api/tests/unit_tests/core/workflow/test_workflow_entry.py index 041c5cc612..2e9e3468fd 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry.py @@ -19,7 +19,7 @@ from graphon.variables.variables import StringVariable @pytest.fixture(autouse=True) -def _mock_ssrf_head(monkeypatch): +def _mock_ssrf_head(monkeypatch: pytest.MonkeyPatch): """Avoid any real network requests during tests. factories.file_factory.remote.get_remote_file_info() uses ssrf_proxy.head diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py index 270d0bf90d..3978cbb1a0 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py @@ -603,7 +603,7 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_rejects_missing_node_class(self, monkeypatch): + def test_run_free_node_rejects_missing_node_class(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( workflow_entry, "resolve_workflow_node_class", @@ -619,7 +619,9 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented(self, monkeypatch): + def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented( + self, monkeypatch: pytest.MonkeyPatch + ): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): @@ -707,7 +709,7 @@ class TestWorkflowEntryHelpers: tenant_id="tenant-id", ) - def test_run_free_node_wraps_execution_failures(self, monkeypatch): + def test_run_free_node_wraps_execution_failures(self, monkeypatch: pytest.MonkeyPatch): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): diff --git a/api/tests/unit_tests/extensions/test_ext_request_logging.py b/api/tests/unit_tests/extensions/test_ext_request_logging.py index dcb457c806..03479686bb 100644 --- a/api/tests/unit_tests/extensions/test_ext_request_logging.py +++ b/api/tests/unit_tests/extensions/test_ext_request_logging.py @@ -71,7 +71,7 @@ def enable_request_logging(monkeypatch: pytest.MonkeyPatch): class TestRequestLoggingExtension: def test_receiver_should_not_be_invoked_if_configuration_is_disabled( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_request_receiver, mock_response_receiver, ): @@ -266,7 +266,9 @@ class TestResponseUnmodified: class TestRequestFinishedInfoAccessLine: - def test_info_access_log_includes_method_path_status_duration_trace_id(self, monkeypatch, caplog): + def test_info_access_log_includes_method_path_status_duration_trace_id( + self, monkeypatch: pytest.MonkeyPatch, caplog + ): """Ensure INFO access line contains expected fields with computed duration and trace id.""" app = _get_test_app() # Push a real request context so flask.request and g are available @@ -299,7 +301,7 @@ class TestRequestFinishedInfoAccessLine: assert "123.456" in msg # rounded to 3 decimals assert "trace-xyz" in msg - def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch, caplog): + def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch: pytest.MonkeyPatch, caplog): app = _get_test_app() with app.test_request_context("/bar", method="POST"): # No g.__request_started_ts set -> duration should be '-' diff --git a/api/tests/unit_tests/extensions/test_pubsub_channel.py b/api/tests/unit_tests/extensions/test_pubsub_channel.py index 926c406ad4..24bbf55cb3 100644 --- a/api/tests/unit_tests/extensions/test_pubsub_channel.py +++ b/api/tests/unit_tests/extensions/test_pubsub_channel.py @@ -1,10 +1,12 @@ +import pytest + from configs import dify_config from extensions import ext_redis from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel from libs.broadcast_channel.redis.sharded_channel import ShardedRedisBroadcastChannel -def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): +def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) @@ -13,7 +15,7 @@ def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): assert isinstance(channel, RedisBroadcastChannel) -def test_get_pubsub_broadcast_channel_sharded(monkeypatch): +def test_get_pubsub_broadcast_channel_sharded(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) diff --git a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py index 8bef01c1ed..7c7f20374e 100644 --- a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py +++ b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py @@ -673,7 +673,7 @@ class TestRedisShardedSubscription: """Test cases for the _RedisShardedSubscription class.""" @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture @@ -889,7 +889,9 @@ class TestRedisShardedSubscription: assert not sharded_subscription._queue.empty() assert sharded_subscription._queue.get_nowait() == b"test sharded payload" - def test_get_message_uses_target_node_for_cluster_client(self, mock_pubsub: MagicMock, monkeypatch): + def test_get_message_uses_target_node_for_cluster_client( + self, mock_pubsub: MagicMock, monkeypatch: pytest.MonkeyPatch + ): """Test that cluster clients use target_node for sharded messages.""" class DummyRedisCluster: @@ -1177,7 +1179,7 @@ class TestRedisSubscriptionCommon: return request.param @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture diff --git a/api/tests/unit_tests/libs/test_archive_storage.py b/api/tests/unit_tests/libs/test_archive_storage.py index de3c9c4737..4363c23571 100644 --- a/api/tests/unit_tests/libs/test_archive_storage.py +++ b/api/tests/unit_tests/libs/test_archive_storage.py @@ -34,7 +34,7 @@ def _client_error(code: str) -> ClientError: return ClientError({"Error": {"Code": code}}, "Operation") -def _mock_client(monkeypatch): +def _mock_client(monkeypatch: pytest.MonkeyPatch): client = MagicMock() client.head_bucket.return_value = None # Configure put_object to return a proper ETag that matches the MD5 hash @@ -56,19 +56,19 @@ def _mock_client(monkeypatch): return client, boto_client -def test_init_disabled(monkeypatch): +def test_init_disabled(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENABLED=False) with pytest.raises(ArchiveStorageNotConfiguredError, match="not enabled"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_missing_config(monkeypatch): +def test_init_missing_config(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENDPOINT=None) with pytest.raises(ArchiveStorageNotConfiguredError, match="incomplete"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_not_found(monkeypatch): +def test_init_bucket_not_found(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("404") @@ -77,7 +77,7 @@ def test_init_bucket_not_found(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_access_denied(monkeypatch): +def test_init_bucket_access_denied(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("403") @@ -86,7 +86,7 @@ def test_init_bucket_access_denied(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_other_error(monkeypatch): +def test_init_bucket_other_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("500") @@ -95,7 +95,7 @@ def test_init_bucket_other_error(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_sets_client(monkeypatch): +def test_init_sets_client(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, boto_client = _mock_client(monkeypatch) @@ -113,7 +113,7 @@ def test_init_sets_client(monkeypatch): assert storage.bucket == BUCKET_NAME -def test_put_object_returns_checksum(monkeypatch): +def test_put_object_returns_checksum(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -132,7 +132,7 @@ def test_put_object_returns_checksum(monkeypatch): assert checksum == expected_md5 -def test_put_object_raises_on_error(monkeypatch): +def test_put_object_raises_on_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -142,7 +142,7 @@ def test_put_object_raises_on_error(monkeypatch): storage.put_object("key", b"data") -def test_get_object_returns_bytes(monkeypatch): +def test_get_object_returns_bytes(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -153,7 +153,7 @@ def test_get_object_returns_bytes(monkeypatch): assert storage.get_object("key") == b"payload" -def test_get_object_missing(monkeypatch): +def test_get_object_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -163,7 +163,7 @@ def test_get_object_missing(monkeypatch): storage.get_object("missing") -def test_get_object_stream(monkeypatch): +def test_get_object_stream(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -174,7 +174,7 @@ def test_get_object_stream(monkeypatch): assert list(storage.get_object_stream("key")) == [b"a", b"b"] -def test_get_object_stream_missing(monkeypatch): +def test_get_object_stream_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -184,7 +184,7 @@ def test_get_object_stream_missing(monkeypatch): list(storage.get_object_stream("missing")) -def test_object_exists(monkeypatch): +def test_object_exists(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -194,7 +194,7 @@ def test_object_exists(monkeypatch): assert storage.object_exists("missing") is False -def test_delete_object_error(monkeypatch): +def test_delete_object_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.delete_object.side_effect = _client_error("500") @@ -204,7 +204,7 @@ def test_delete_object_error(monkeypatch): storage.delete_object("key") -def test_list_objects(monkeypatch): +def test_list_objects(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -219,7 +219,7 @@ def test_list_objects(monkeypatch): paginator.paginate.assert_called_once_with(Bucket="archive-bucket", Prefix="prefix") -def test_list_objects_error(monkeypatch): +def test_list_objects_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -231,7 +231,7 @@ def test_list_objects_error(monkeypatch): storage.list_objects("prefix") -def test_generate_presigned_url(monkeypatch): +def test_generate_presigned_url(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.return_value = "http://signed-url" @@ -247,7 +247,7 @@ def test_generate_presigned_url(monkeypatch): assert url == "http://signed-url" -def test_generate_presigned_url_error(monkeypatch): +def test_generate_presigned_url_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.side_effect = _client_error("500") diff --git a/api/tests/unit_tests/libs/test_pandas.py b/api/tests/unit_tests/libs/test_pandas.py index 21c2f0781d..a4739dbbc2 100644 --- a/api/tests/unit_tests/libs/test_pandas.py +++ b/api/tests/unit_tests/libs/test_pandas.py @@ -1,7 +1,8 @@ import pandas as pd +import pytest -def test_pandas_csv(tmp_path, monkeypatch): +def test_pandas_csv(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -16,7 +17,7 @@ def test_pandas_csv(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx(tmp_path, monkeypatch): +def test_pandas_xlsx(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -31,7 +32,7 @@ def test_pandas_xlsx(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch): +def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data1 = {"col1": [1, 2, 3, 4, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data1) diff --git a/api/tests/unit_tests/libs/test_rate_limiter.py b/api/tests/unit_tests/libs/test_rate_limiter.py index 9d44b07b5e..5052033db8 100644 --- a/api/tests/unit_tests/libs/test_rate_limiter.py +++ b/api/tests/unit_tests/libs/test_rate_limiter.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock +import pytest + from libs import helper as helper_module @@ -31,7 +33,7 @@ class _FakeRedis: return True -def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): +def test_rate_limiter_counts_attempts_within_same_second(monkeypatch: pytest.MonkeyPatch): fake_redis = _FakeRedis() monkeypatch.setattr(helper_module.time, "time", lambda: 1000) @@ -48,7 +50,7 @@ def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): assert limiter.is_rate_limited("203.0.113.10") is True -def test_rate_limiter_uses_injected_redis(monkeypatch): +def test_rate_limiter_uses_injected_redis(monkeypatch: pytest.MonkeyPatch): redis_client = MagicMock() redis_client.zcard.return_value = 1 monkeypatch.setattr(helper_module.time, "time", lambda: 1000) diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index 6a65b5faa0..734568d37b 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,6 @@ from unittest.mock import MagicMock +import pytest from werkzeug.wrappers import Response from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN @@ -30,7 +31,7 @@ def test_extract_access_token(): assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] -def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): +def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", "", raising=False) @@ -38,7 +39,7 @@ def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): assert token._real_cookie_name("csrf_token") == "__Host-csrf_token" -def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): +def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) @@ -46,7 +47,7 @@ def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): assert token._real_cookie_name("csrf_token") == "csrf_token" -def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch): +def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) diff --git a/api/tests/unit_tests/services/plugin/conftest.py b/api/tests/unit_tests/services/plugin/conftest.py index 80c6077b0c..9dc4fa0390 100644 --- a/api/tests/unit_tests/services/plugin/conftest.py +++ b/api/tests/unit_tests/services/plugin/conftest.py @@ -21,7 +21,7 @@ def make_features( @pytest.fixture -def mock_installer(monkeypatch): +def mock_installer(monkeypatch: pytest.MonkeyPatch): """Patch PluginInstaller at the service import site.""" mock = MagicMock() monkeypatch.setattr("services.plugin.plugin_service.PluginInstaller", lambda: mock) diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py index 1a2d062208..287391c24c 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py @@ -2,12 +2,13 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from services.rag_pipeline.rag_pipeline_task_proxy import RagPipelineTaskProxy @pytest.fixture -def proxy(mocker): +def proxy(mocker: MockerFixture): """Create a RagPipelineTaskProxy with mocked dependencies.""" mocker.patch("services.rag_pipeline.rag_pipeline_task_proxy.TenantIsolatedTaskQueue") entity = Mock() diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index d3f9c5dd9f..216c5d9db6 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -20,6 +20,7 @@ from contextlib import contextmanager from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.app_generate_service as ags_module from core.app.entities.app_invoke_entities import InvokeFrom @@ -96,7 +97,7 @@ def _noop_rate_limit_context(rate_limit, request_id): class TestBuildStreamingTaskOnSubscribe: """Tests for AppGenerateService._build_streaming_task_on_subscribe.""" - def test_streams_mode_starts_immediately(self, monkeypatch): + def test_streams_mode_starts_immediately(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") called = [] cb = AppGenerateService._build_streaming_task_on_subscribe(lambda: called.append(1)) @@ -106,7 +107,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] # not called again - def test_pubsub_mode_starts_on_subscribe(self, monkeypatch): + def test_pubsub_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) # large to prevent timer called = [] @@ -118,7 +119,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_sharded_mode_starts_on_subscribe(self, monkeypatch): + def test_sharded_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): """sharded is treated like pubsub (i.e. not 'streams').""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) @@ -128,7 +129,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_pubsub_fallback_timer_fires(self, monkeypatch): + def test_pubsub_fallback_timer_fires(self, monkeypatch: pytest.MonkeyPatch): """When nobody subscribes fast enough the fallback timer fires.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 50) # 50 ms @@ -137,7 +138,7 @@ class TestBuildStreamingTaskOnSubscribe: time.sleep(0.2) # give the timer time to fire assert called == [1] - def test_exception_in_start_task_returns_false(self, monkeypatch): + def test_exception_in_start_task_returns_false(self, monkeypatch: pytest.MonkeyPatch): """When start_task raises, _try_start returns False and next call retries.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") call_count = 0 @@ -154,7 +155,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert call_count == 2 - def test_concurrent_subscribe_only_starts_once(self, monkeypatch): + def test_concurrent_subscribe_only_starts_once(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) call_count = 0 @@ -176,31 +177,31 @@ class TestBuildStreamingTaskOnSubscribe: # _get_max_active_requests # --------------------------------------------------------------------------- class TestGetMaxActiveRequests: - def test_both_zero_returns_zero(self, monkeypatch): + def test_both_zero_returns_zero(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 0 - def test_app_limit_only(self, monkeypatch): + def test_app_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_config_limit_only(self, monkeypatch): + def test_config_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 10) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 10 - def test_both_non_zero_returns_min(self, monkeypatch): + def test_both_non_zero_returns_min(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 20) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_default_active_requests_used_when_app_has_none(self, monkeypatch): + def test_default_active_requests_used_when_app_has_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 15) app = _make_app(AppMode.CHAT, max_active_requests=0) @@ -214,7 +215,7 @@ class TestGenerate: """Tests for AppGenerateService.generate covering each mode.""" @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) # Prevent AppExecutionParams.new from touching real models via isinstance @@ -224,7 +225,7 @@ class TestGenerate: ) # -- COMPLETION --------------------------------------------------------- - def test_completion_mode(self, mocker): + def test_completion_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate", return_value={"result": "ok"}, @@ -244,7 +245,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via mode ------------------------------------------------ - def test_agent_chat_mode(self, mocker): + def test_agent_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent"}, @@ -264,7 +265,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via is_agent flag (non-AGENT_CHAT mode) ----------------- - def test_agent_via_is_agent_flag(self, mocker): + def test_agent_via_is_agent_flag(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent-via-flag"}, @@ -285,7 +286,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- CHAT --------------------------------------------------------------- - def test_chat_mode(self, mocker): + def test_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.ChatAppGenerator.generate", return_value={"result": "chat"}, @@ -306,7 +307,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- ADVANCED_CHAT blocking --------------------------------------------- - def test_advanced_chat_blocking(self, mocker): + def test_advanced_chat_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) @@ -333,7 +334,7 @@ class TestGenerate: retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- - def test_advanced_chat_streaming(self, mocker, monkeypatch): + def test_advanced_chat_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -365,7 +366,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- WORKFLOW blocking -------------------------------------------------- - def test_workflow_blocking(self, mocker): + def test_workflow_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -390,7 +391,7 @@ class TestGenerate: assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" # -- WORKFLOW streaming ------------------------------------------------- - def test_workflow_streaming(self, mocker, monkeypatch): + def test_workflow_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -422,7 +423,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- Invalid mode ------------------------------------------------------- - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app("invalid-mode", is_agent=False) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate( @@ -439,14 +440,14 @@ class TestGenerate: # --------------------------------------------------------------------------- class TestGenerateBilling: @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) mocker.patch( "services.app_generate_service.rate_limit_context", _noop_rate_limit_context, ) - def test_billing_enabled_consumes_quota(self, mocker, monkeypatch): + def test_billing_enabled_consumes_quota(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() reserve_mock = mocker.patch( @@ -472,7 +473,9 @@ class TestGenerateBilling: reserve_mock.assert_called_once_with(QuotaType.WORKFLOW, "tenant-id") quota_charge.commit.assert_called_once() - def test_billing_quota_exceeded_raises_rate_limit_error(self, mocker, monkeypatch): + def test_billing_quota_exceeded_raises_rate_limit_error( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): from services.errors.app import QuotaExceededError from services.errors.llm import InvokeRateLimitError @@ -491,7 +494,7 @@ class TestGenerateBilling: streaming=False, ) - def test_exception_refunds_quota_and_exits_rate_limit(self, mocker, monkeypatch): + def test_exception_refunds_quota_and_exits_rate_limit(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() mocker.patch( @@ -517,7 +520,9 @@ class TestGenerateBilling: ) quota_charge.refund.assert_called_once() - def test_rate_limit_exit_called_in_finally_for_blocking(self, mocker, monkeypatch): + def test_rate_limit_exit_called_in_finally_for_blocking( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): """For non-streaming (blocking) calls, rate_limit.exit should be called in finally.""" monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) @@ -552,7 +557,7 @@ class TestGenerateBilling: # _get_workflow # --------------------------------------------------------------------------- class TestGetWorkflow: - def test_debugger_fetches_draft(self, mocker): + def test_debugger_fetches_draft(self, mocker: MockerFixture): draft_wf = _make_workflow() ws = MagicMock() ws.get_draft_workflow.return_value = draft_wf @@ -562,7 +567,7 @@ class TestGetWorkflow: assert result is draft_wf ws.get_draft_workflow.assert_called_once() - def test_debugger_raises_when_no_draft(self, mocker): + def test_debugger_raises_when_no_draft(self, mocker: MockerFixture): ws = MagicMock() ws.get_draft_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -570,7 +575,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not initialized"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.DEBUGGER) - def test_non_debugger_fetches_published(self, mocker): + def test_non_debugger_fetches_published(self, mocker: MockerFixture): pub_wf = _make_workflow() ws = MagicMock() ws.get_published_workflow.return_value = pub_wf @@ -580,7 +585,7 @@ class TestGetWorkflow: assert result is pub_wf ws.get_published_workflow.assert_called_once() - def test_non_debugger_raises_when_no_published(self, mocker): + def test_non_debugger_raises_when_no_published(self, mocker: MockerFixture): ws = MagicMock() ws.get_published_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -588,7 +593,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not published"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API) - def test_specific_workflow_id_valid_uuid(self, mocker): + def test_specific_workflow_id_valid_uuid(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) specific_wf = _make_workflow(workflow_id=valid_uuid) ws = MagicMock() @@ -601,7 +606,7 @@ class TestGetWorkflow: assert result is specific_wf ws.get_published_workflow_by_id.assert_called_once() - def test_specific_workflow_id_invalid_uuid(self, mocker): + def test_specific_workflow_id_invalid_uuid(self, mocker: MockerFixture): ws = MagicMock() mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -610,7 +615,7 @@ class TestGetWorkflow: _make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API, workflow_id="not-a-uuid" ) - def test_specific_workflow_id_not_found(self, mocker): + def test_specific_workflow_id_not_found(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) ws = MagicMock() ws.get_published_workflow_by_id.return_value = None @@ -626,7 +631,7 @@ class TestGetWorkflow: # generate_single_iteration # --------------------------------------------------------------------------- class TestGenerateSingleIteration: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -644,7 +649,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "iteration"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -662,7 +667,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "wf-iteration"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.CHAT) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_iteration(app_model=app, user=_make_user(), node_id="n1", args={}) @@ -672,7 +677,7 @@ class TestGenerateSingleIteration: # generate_single_loop # --------------------------------------------------------------------------- class TestGenerateSingleLoop: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -690,7 +695,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "loop"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -708,7 +713,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "wf-loop"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.COMPLETION) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_loop(app_model=app, user=_make_user(), node_id="n1", args=MagicMock()) @@ -718,7 +723,7 @@ class TestGenerateSingleLoop: # generate_more_like_this # --------------------------------------------------------------------------- class TestGenerateMoreLikeThis: - def test_delegates_to_completion_generator(self, mocker): + def test_delegates_to_completion_generator(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate_more_like_this", return_value={"result": "similar"}, @@ -739,7 +744,7 @@ class TestGenerateMoreLikeThis: # get_response_generator # --------------------------------------------------------------------------- class TestGetResponseGenerator: - def test_non_ended_workflow_run(self, mocker): + def test_non_ended_workflow_run(self, mocker: MockerFixture): app = _make_app(AppMode.ADVANCED_CHAT) workflow_run = MagicMock() workflow_run.id = "run-1" @@ -756,7 +761,7 @@ class TestGetResponseGenerator: result = AppGenerateService.get_response_generator(app_model=app, workflow_run=workflow_run) gen_instance.retrieve_events.assert_called_once() - def test_ended_workflow_run_still_returns_generator(self, mocker): + def test_ended_workflow_run_still_returns_generator(self, mocker: MockerFixture): """Even when the run is ended, the current code still returns a generator (TODO branch).""" app = _make_app(AppMode.WORKFLOW) workflow_run = MagicMock() diff --git a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py index 30aa359b45..4293be8f72 100644 --- a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py +++ b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py @@ -89,7 +89,7 @@ class _FakeStreams: @pytest.fixture -def _patch_get_channel_streams(monkeypatch): +def _patch_get_channel_streams(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.streams_channel import StreamsBroadcastChannel fake = _FakeStreams() @@ -108,7 +108,7 @@ def _patch_get_channel_streams(monkeypatch): @pytest.fixture -def _patch_get_channel_pubsub(monkeypatch): +def _patch_get_channel_pubsub(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel store: dict[str, deque[bytes]] = defaultdict(deque) @@ -163,7 +163,7 @@ def test_streams_full_flow_prepublish_and_replay(): @pytest.mark.usefixtures("_patch_get_channel_pubsub") -def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch): +def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch: pytest.MonkeyPatch): # Speed up any potential timer if it accidentally triggers monkeypatch.setattr("services.app_generate_service.SSE_TASK_START_FALLBACK_MS", 50) diff --git a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py index 9a513c3fe6..f5879d973d 100644 --- a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py +++ b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py @@ -22,7 +22,7 @@ class FakeLock: @pytest.fixture -def fake_current_user(monkeypatch): +def fake_current_user(monkeypatch: pytest.MonkeyPatch): user = create_autospec(Account, instance=True) user.id = "user-1" user.current_tenant_id = "tenant-1" @@ -31,7 +31,7 @@ def fake_current_user(monkeypatch): @pytest.fixture -def fake_features(monkeypatch): +def fake_features(monkeypatch: pytest.MonkeyPatch): """Features.billing.enabled == False to skip quota logic.""" features = types.SimpleNamespace( billing=types.SimpleNamespace(enabled=False, subscription=types.SimpleNamespace(plan="ENTERPRISE")), @@ -45,7 +45,7 @@ def fake_features(monkeypatch): @pytest.fixture -def fake_lock(monkeypatch): +def fake_lock(monkeypatch: pytest.MonkeyPatch): """Patch redis_client.lock to always raise LockNotOwnedError on enter.""" def _fake_lock(name, timeout=None, *args, **kwargs): @@ -61,7 +61,7 @@ def fake_lock(monkeypatch): def test_save_document_with_dataset_id_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_features, fake_lock, @@ -118,7 +118,7 @@ def test_save_document_with_dataset_id_ignores_lock_not_owned( def test_add_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): @@ -161,7 +161,7 @@ def test_add_segment_ignores_lock_not_owned( def test_multi_create_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): diff --git a/api/tests/unit_tests/services/test_human_input_service.py b/api/tests/unit_tests/services/test_human_input_service.py index 1c686ce838..8e5293a3c5 100644 --- a/api/tests/unit_tests/services/test_human_input_service.py +++ b/api/tests/unit_tests/services/test_human_input_service.py @@ -3,6 +3,7 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.human_input_service as human_input_service_module from core.repositories.human_input_repository import ( @@ -182,7 +183,9 @@ def test_get_form_definition_by_token_for_console_uses_repository(sample_form_re assert form.get_definition() == console_record.definition -def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_calls_repository_and_enqueue( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -209,7 +212,9 @@ def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, m enqueue_spy.assert_called_once_with(sample_form_record.workflow_run_id) -def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_skips_enqueue_for_delivery_test( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) test_record = dataclasses.replace( @@ -232,7 +237,9 @@ def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record enqueue_spy.assert_not_called() -def test_submit_form_by_token_passes_submission_user_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_passes_submission_user_id( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -470,7 +477,7 @@ def test_form_submitted_error_init(): assert error.code == 412 -def test_human_input_service_init_with_engine(mocker): +def test_human_input_service_init_with_engine(mocker: MockerFixture): engine = MagicMock(spec=human_input_service_module.Engine) sessionmaker_mock = mocker.patch("services.human_input_service.sessionmaker") @@ -527,7 +534,7 @@ def test_submit_form_by_token_delivery_not_enabled(mock_session_factory): service.submit_form_by_token(RecipientType.STANDALONE_WEB_APP, "token", "action", {}) -def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker: MockerFixture): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record diff --git a/api/tests/unit_tests/services/test_message_service.py b/api/tests/unit_tests/services/test_message_service.py index 7adc15d63e..51f8b3ef5b 100644 --- a/api/tests/unit_tests/services/test_message_service.py +++ b/api/tests/unit_tests/services/test_message_service.py @@ -906,7 +906,7 @@ class TestMessageServiceSuggestedQuestions: ): """Test successful suggested questions generation in basic Chat mode.""" # Arrange - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) user = factory.create_end_user_mock() message = factory.create_message_mock() mock_get_message.return_value = message @@ -953,7 +953,7 @@ class TestMessageServiceSuggestedQuestions: """Test suggested question generation uses frontend configured model and prompt.""" from core.app.entities.app_invoke_entities import InvokeFrom - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() @@ -1024,7 +1024,7 @@ class TestMessageServiceSuggestedQuestions: factory, ): """Test invalid frontend configured model falls back to tenant default model.""" - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() diff --git a/api/tests/unit_tests/services/test_model_load_balancing_service.py b/api/tests/unit_tests/services/test_model_load_balancing_service.py index 3119af40a2..beecf73caa 100644 --- a/api/tests/unit_tests/services/test_model_load_balancing_service.py +++ b/api/tests/unit_tests/services/test_model_load_balancing_service.py @@ -104,7 +104,7 @@ def test_enable_disable_model_load_balancing_should_call_provider_configuration_ service.provider_manager.get_configurations.return_value = {"openai": provider_configuration} # Act - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) # Assert getattr(provider_configuration, expected_provider_method).assert_called_once_with( @@ -125,7 +125,7 @@ def test_enable_disable_model_load_balancing_should_raise_value_error_when_provi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_raise_value_error_when_provider_missing( @@ -136,7 +136,7 @@ def test_get_load_balancing_configs_should_raise_value_error_when_provider_missi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_for_custom_provider( @@ -177,7 +177,7 @@ def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_fo "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, ) # Assert @@ -238,7 +238,7 @@ def test_get_load_balancing_configs_should_reorder_existing_inherit_and_tolerate "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, config_from="predefined-model", ) @@ -259,7 +259,7 @@ def test_get_load_balancing_config_should_raise_value_error_when_provider_missin # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") def test_get_load_balancing_config_should_return_none_when_config_not_found( @@ -272,7 +272,7 @@ def test_get_load_balancing_config_should_return_none_when_config_not_found( mock_db.session.scalar.return_value = None # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result is None @@ -292,7 +292,7 @@ def test_get_load_balancing_config_should_return_obfuscated_payload_when_config_ mock_db.session.scalar.return_value = config # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result == { @@ -335,7 +335,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_provider_mi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [], "custom-model", ) @@ -354,7 +354,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_configs_is_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], "invalid-configs"), "custom-model", ) @@ -375,7 +375,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_config_item "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], ["bad-item"]), "custom-model", ) @@ -397,7 +397,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credential_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -418,7 +418,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"enabled": True}], "custom-model", ) @@ -428,7 +428,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "cfg-without-enabled"}], "custom-model", ) @@ -450,7 +450,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_existing_co "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-2", "name": "invalid", "enabled": True}], "custom-model", ) @@ -472,7 +472,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-1", "name": "new", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -482,7 +482,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new-config", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -519,7 +519,7 @@ def test_update_load_balancing_configs_should_update_existing_create_new_and_del "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [ {"id": "cfg-1", "name": "updated-name", "enabled": False, "credentials": {"api_key": "plain"}}, {"name": "new-config", "enabled": True, "credentials": {"api_key": "plain"}}, @@ -553,7 +553,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "__inherit__", "enabled": True, "credentials": {"api_key": "x"}}], "custom-model", ) @@ -563,7 +563,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new", "enabled": True}], "custom-model", ) @@ -585,7 +585,7 @@ def test_update_load_balancing_configs_should_create_from_existing_provider_cred "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -611,7 +611,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_provi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) @@ -631,7 +631,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_confi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -654,7 +654,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -662,7 +662,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) diff --git a/api/tests/unit_tests/services/test_model_provider_service.py b/api/tests/unit_tests/services/test_model_provider_service.py index 28d459eac9..9e4eeb2d6e 100644 --- a/api/tests/unit_tests/services/test_model_provider_service.py +++ b/api/tests/unit_tests/services/test_model_provider_service.py @@ -90,7 +90,7 @@ class TestModelProviderServiceConfiguration: ) manager.get_configurations.return_value = {"openai": allowed, "embedding": filtered} - result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM) assert len(result) == 1 assert result[0].provider == "openai" @@ -232,7 +232,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -245,7 +245,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, }, @@ -258,7 +258,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_name": "cred-a", @@ -277,7 +277,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_id": "cred-1", @@ -298,7 +298,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -311,7 +311,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -324,7 +324,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -337,7 +337,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", }, "delete_custom_model", @@ -425,7 +425,7 @@ class TestModelProviderServiceListingsAndDefaults: provider_configurations = SimpleNamespace(get_models=MagicMock(return_value=models)) manager.get_configurations.return_value = provider_configurations - result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) provider_configurations.get_models.assert_called_once_with(model_type=ModelType.LLM, only_active=True) assert len(result) == 1 @@ -495,7 +495,7 @@ class TestModelProviderServiceListingsAndDefaults: ), ) - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is not None assert result.model == "gpt-4o" @@ -506,7 +506,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.return_value = None - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -514,7 +514,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.side_effect = RuntimeError("boom") - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -523,7 +523,7 @@ class TestModelProviderServiceListingsAndDefaults: service.update_default_model_of_model_type( tenant_id="tenant-1", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, provider="openai", model="gpt-4o", ) @@ -593,7 +593,7 @@ class TestModelProviderServiceListingsAndDefaults: tenant_id="tenant-1", provider="openai", model="gpt-4o", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, ) getattr(provider_configuration, provider_method_name).assert_called_once_with( diff --git a/api/tests/unit_tests/services/test_trigger_provider_service.py b/api/tests/unit_tests/services/test_trigger_provider_service.py index 6eba60e5f1..4da4af2d93 100644 --- a/api/tests/unit_tests/services/test_trigger_provider_service.py +++ b/api/tests/unit_tests/services/test_trigger_provider_service.py @@ -325,7 +325,7 @@ def test_update_trigger_subscription_should_raise_error_when_name_conflicts( id="sub-1", name="old", provider_id="langgenius/github/github", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.side_effect = [subscription, object()] # found sub, name conflict _mock_get_trigger_provider(mocker, provider_controller) @@ -350,7 +350,7 @@ def test_update_trigger_subscription_should_update_fields_and_clear_cache( properties={"project": "enc-old"}, parameters={"event": "old"}, credentials={"api_key": "enc-old"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credential_expires_at=0, expires_at=0, ) @@ -456,7 +456,7 @@ def test_delete_trigger_provider_should_delete_and_clear_cache_even_if_unsubscri id="sub-1", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"token": "enc"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -492,7 +492,7 @@ def test_delete_trigger_provider_should_skip_unsubscribe_for_unauthorized( id="sub-2", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.UNAUTHORIZED.value, + credential_type=CredentialType.UNAUTHORIZED, credentials={}, to_entity=lambda: SimpleNamespace(id="sub-2"), ) @@ -527,7 +527,7 @@ def test_refresh_oauth_token_should_raise_error_for_non_oauth_credentials( mocker: MockerFixture, mock_session: MagicMock ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY) mock_session.scalar.return_value = subscription # Act + Assert @@ -545,7 +545,7 @@ def test_refresh_oauth_token_should_refresh_and_persist_new_credentials( subscription = SimpleNamespace( provider_id=str(provider_id), user_id="user-1", - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"access_token": "enc"}, credential_expires_at=0, ) @@ -613,7 +613,7 @@ def test_refresh_subscription_should_refresh_and_persist_properties( parameters={"event": "push"}, properties={"p": "enc"}, credentials={"c": "enc"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.return_value = subscription _mock_get_trigger_provider(mocker, provider_controller) @@ -989,7 +989,7 @@ def test_verify_subscription_credentials_should_raise_when_api_key_validation_fa provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) provider_controller.validate_credentials.side_effect = RuntimeError("bad credentials") @@ -1012,7 +1012,7 @@ def test_verify_subscription_credentials_should_return_verified_when_api_key_val provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1036,7 +1036,7 @@ def test_verify_subscription_credentials_should_return_verified_for_non_api_key_ provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2.value, credentials={}) + subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2, credentials={}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1100,7 +1100,7 @@ def test_rebuild_trigger_subscription_should_raise_for_unsupported_credential_ty provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED.value) + subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1126,7 +1126,7 @@ def test_rebuild_trigger_subscription_should_raise_when_unsubscribe_fails( id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -1159,7 +1159,7 @@ def test_rebuild_trigger_subscription_should_resubscribe_and_update_existing_sub id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old-key"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) diff --git a/api/tests/unit_tests/services/test_webhook_service.py b/api/tests/unit_tests/services/test_webhook_service.py index ffdcc046f9..95edc436d7 100644 --- a/api/tests/unit_tests/services/test_webhook_service.py +++ b/api/tests/unit_tests/services/test_webhook_service.py @@ -140,7 +140,7 @@ class TestWebhookServiceUnit: assert args[1] == "text/plain" assert args[2] is webhook_trigger - def test_detect_binary_mimetype_uses_magic(self, monkeypatch): + def test_detect_binary_mimetype_uses_magic(self, monkeypatch: pytest.MonkeyPatch): """python-magic output should be used when available.""" fake_magic = MagicMock() fake_magic.from_buffer.return_value = "image/png" @@ -151,7 +151,7 @@ class TestWebhookServiceUnit: assert result == "image/png" fake_magic.from_buffer.assert_called_once() - def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch): + def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic is unavailable.""" monkeypatch.setattr("services.trigger.webhook_service.magic", None) @@ -159,7 +159,7 @@ class TestWebhookServiceUnit: assert result == "application/octet-stream" - def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch): + def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic raises an exception.""" try: import magic as real_magic diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index 50b4bfc5f8..642a459e0b 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -61,7 +61,7 @@ class TestWorkflowAssociatedDataFactory: def create_app_mock( app_id: str = "app-123", tenant_id: str = "tenant-456", - mode: str = AppMode.WORKFLOW.value, + mode: str = AppMode.WORKFLOW, workflow_id: str | None = None, **kwargs, ) -> MagicMock: @@ -93,7 +93,7 @@ class TestWorkflowAssociatedDataFactory: tenant_id: str = "tenant-456", app_id: str = "app-123", version: str = Workflow.VERSION_DRAFT, - workflow_type: str = WorkflowType.WORKFLOW.value, + workflow_type: str = WorkflowType.WORKFLOW, graph: dict[str, Any] | None = None, features: dict[str, Any] | None = None, unique_hash: str | None = None, @@ -584,7 +584,7 @@ class TestWorkflowService: id="published-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version="2026-03-19T00:00:00", graph=json.dumps(TestWorkflowAssociatedDataFactory.create_valid_workflow_graph()), features=json.dumps(legacy_features), @@ -597,7 +597,7 @@ class TestWorkflowService: id="draft-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version=Workflow.VERSION_DRAFT, graph=json.dumps({"nodes": [], "edges": []}), features=json.dumps({}), @@ -685,7 +685,7 @@ class TestWorkflowService: Different app modes have different feature configurations. This ensures the features match the expected schema for workflow apps. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) features = {"file_upload": {"enabled": False}} with patch("services.workflow_service.WorkflowAppConfigManager.config_validate") as mock_validate: @@ -696,7 +696,7 @@ class TestWorkflowService: def test_validate_features_structure_advanced_chat_mode(self, workflow_service): """Test validate_features_structure for advanced chat mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT) features = {"opening_statement": "Hello"} with patch("services.workflow_service.AdvancedChatAppConfigManager.config_validate") as mock_validate: @@ -707,7 +707,7 @@ class TestWorkflowService: def test_validate_features_structure_invalid_mode_raises_error(self, workflow_service): """Test validate_features_structure raises error for invalid mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) features = {} with pytest.raises(ValueError, match="Invalid app mode"): @@ -1326,7 +1326,7 @@ class TestWorkflowService: The conversion creates equivalent workflow nodes from the chat configuration, giving users more control and customization options. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = { "name": "Converted Workflow", @@ -1337,7 +1337,7 @@ class TestWorkflowService: with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1353,13 +1353,13 @@ class TestWorkflowService: Completion apps are simpler (single prompt-response), so the conversion creates a basic workflow with fewer nodes. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {"name": "Converted Workflow"} with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1373,7 +1373,7 @@ class TestWorkflowService: Only chat and completion apps can be converted to workflows. Apps that are already workflows or have other modes cannot be converted. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {} @@ -2087,7 +2087,7 @@ class TestSetupVariablePool: This helper initialises the VariablePool used for single-step workflow execution. """ - def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW.value) -> MagicMock: + def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW) -> MagicMock: wf = MagicMock(spec=Workflow) wf.app_id = "app-1" wf.id = "wf-1" @@ -2176,7 +2176,7 @@ class TestSetupVariablePool: from models.workflow import WorkflowType # Arrange - workflow = self._make_workflow(workflow_type=WorkflowType.CHAT.value) + workflow = self._make_workflow(workflow_type=WorkflowType.CHAT) # Act with ( diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index 663eec6a06..b5b9f0bd97 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -398,7 +398,7 @@ class TestWorkflowDraftVariableService: self, mock_engine, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable when execution record doesn't exist""" mock_repo_session = Mock(spec=Session) @@ -435,7 +435,7 @@ class TestWorkflowDraftVariableService: def test_reset_node_variable_with_valid_execution_record( self, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable with valid execution record - should restore from execution""" mock_repo_session = Mock(spec=Session) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index dfdbd9acd6..17e9a077d6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -414,8 +414,8 @@ def test_parse_event_message_should_parse_only_json_object( def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: # Arrange - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} # Act is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) @@ -426,7 +426,7 @@ def test_is_terminal_event_should_recognize_finished_and_optional_paused_events( assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, close_on_pause=True) is False def test_apply_message_context_should_update_payload_when_context_exists() -> None: @@ -569,7 +569,7 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) # Act @@ -584,9 +584,9 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -643,7 +643,7 @@ def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_id ) # Assert - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -686,7 +686,7 @@ def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( ) # Assert - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -706,7 +706,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -729,7 +729,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None @@ -779,7 +779,7 @@ def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.M session_maker=cast(sessionmaker[Session], session_maker), ) - assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED assert events[-2]["data"]["form_token"] == "wtok" assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) pause_data = events[-1]["data"] @@ -837,6 +837,6 @@ def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_contex ) pause_event = cast(Mapping[str, Any], events[-1]) - assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py index d2634d7d7b..4d711f1bf8 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py @@ -215,8 +215,8 @@ class TestWorkflowEventSnapshotHelpers: assert result == expected def test_is_terminal_event_should_recognize_finished_and_optional_paused_events(self) -> None: - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} is_finished = service_module._is_terminal_event(finished_event, include_paused=False) paused_without_flag = service_module._is_terminal_event(paused_event, include_paused=False) @@ -225,7 +225,7 @@ class TestWorkflowEventSnapshotHelpers: assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, include_paused=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, include_paused=True) is False def test_apply_message_context_should_update_payload_when_context_exists(self) -> None: payload: dict[str, Any] = {"event": "workflow_started"} @@ -352,7 +352,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) events = list( @@ -365,9 +365,9 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -421,7 +421,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( @@ -461,7 +461,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( @@ -480,7 +480,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -501,5 +501,5 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index 72508bef52..2544c9d61a 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -122,7 +122,7 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) @@ -208,7 +208,7 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) diff --git a/api/uv.lock b/api/uv.lock index 59989282eb..b41caea7f9 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -50,7 +50,10 @@ members = [ "dify-vdb-vikingdb", "dify-vdb-weaviate", ] -overrides = [{ name = "pyarrow", specifier = ">=18.0.0" }] +overrides = [ + { name = "litellm", specifier = ">=1.83.7" }, + { name = "pyarrow", specifier = ">=18.0.0" }, +] [[package]] name = "abnf" @@ -889,14 +892,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.1" +version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, ] [[package]] @@ -3351,14 +3354,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320, upload-time = "2024-08-20T17:11:42.348Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304, upload-time = "2024-09-11T14:56:08.937Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269, upload-time = "2024-08-20T17:11:41.102Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514, upload-time = "2024-09-11T14:56:07.019Z" }, ] [[package]] @@ -3499,7 +3502,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.25.1" +version = "4.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -3507,9 +3510,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778, upload-time = "2024-07-08T18:40:05.546Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, + { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462, upload-time = "2024-07-08T18:40:00.165Z" }, ] [[package]] @@ -3650,7 +3653,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.83.0" +version = "1.83.14" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -3666,9 +3669,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/92/6ce9737554994ca8e536e5f4f6a87cc7c4774b656c9eb9add071caf7d54b/litellm-1.83.0.tar.gz", hash = "sha256:860bebc76c4bb27b4cf90b4a77acd66dba25aced37e3db98750de8a1766bfb7a", size = 17333062, upload-time = "2026-03-31T05:08:25.331Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/7c/c095649380adc96c8630273c1768c2ad1e74aa2ee1dd8dd05d218a60569f/litellm-1.83.14.tar.gz", hash = "sha256:24aef9b47cdc424c833e32f3727f411741c690832cd1fe4405e0077144fe09c9", size = 14836599, upload-time = "2026-04-26T03:16:10.176Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/2c/a670cc050fcd6f45c6199eb99e259c73aea92edba8d5c2fc1b3686d36217/litellm-1.83.0-py3-none-any.whl", hash = "sha256:88c536d339248f3987571493015784671ba3f193a328e1ea6780dbebaa2094a8", size = 15610306, upload-time = "2026-03-31T05:08:21.987Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5c/1b5691575420135e90578543b2bf219497caa33cfd0af64cb38f30288450/litellm-1.83.14-py3-none-any.whl", hash = "sha256:92b11ba2a32cf80707ddf388d18526696c7999a21b418c5e3b6eda1243d2cfdb", size = 16457054, upload-time = "2026-04-26T03:16:05.72Z" }, ] [[package]] @@ -4131,7 +4134,7 @@ wheels = [ [[package]] name = "openai" -version = "2.8.1" +version = "2.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -4143,9 +4146,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717, upload-time = "2026-02-24T20:02:07.958Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122, upload-time = "2026-02-24T20:02:05.669Z" }, ] [[package]] @@ -6443,27 +6446,28 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, ] [[package]]