diff --git a/.agents/skills/frontend-query-mutation/SKILL.md b/.agents/skills/frontend-query-mutation/SKILL.md index 49888bdb66..10c49d222e 100644 --- a/.agents/skills/frontend-query-mutation/SKILL.md +++ b/.agents/skills/frontend-query-mutation/SKILL.md @@ -1,6 +1,6 @@ --- name: frontend-query-mutation -description: Guide for implementing Dify frontend query and mutation patterns with TanStack Query and oRPC. Trigger when creating or updating contracts in web/contract, wiring router composition, consuming consoleQuery or marketplaceQuery in components or services, deciding whether to call queryOptions() directly or extract a helper or use-* hook, handling conditional queries, cache invalidation, mutation error handling, or migrating legacy service calls to contract-first query and mutation helpers. +description: Guide for implementing Dify frontend query and mutation patterns with TanStack Query and oRPC. Trigger when creating or updating contracts in web/contract, wiring router composition, consuming consoleQuery or marketplaceQuery in components or services, deciding whether to call queryOptions()/mutationOptions() directly or extract a helper or use-* hook, configuring oRPC experimental_defaults/default options, handling conditional queries, cache updates/invalidation, mutation error handling, or migrating legacy service calls to contract-first query and mutation helpers. --- # Frontend Query & Mutation @@ -9,22 +9,24 @@ description: Guide for implementing Dify frontend query and mutation patterns wi - Keep contract as the single source of truth in `web/contract/*`. - Prefer contract-shaped `queryOptions()` and `mutationOptions()`. -- Keep invalidation and mutation flow knowledge in the service layer. +- Keep default cache behavior with `consoleQuery`/`marketplaceQuery` setup, and keep business orchestration in feature vertical hooks when direct contract calls are not enough. +- Treat `web/service/use-*` query or mutation wrappers as legacy migration targets, not the preferred destination. - Keep abstractions minimal to preserve TypeScript inference. ## Workflow 1. Identify the change surface. - Read `references/contract-patterns.md` for contract files, router composition, client helpers, and query or mutation call-site shape. - - Read `references/runtime-rules.md` for conditional queries, invalidation, error handling, and legacy migrations. + - Read `references/runtime-rules.md` for conditional queries, default options, cache updates/invalidation, error handling, and legacy migrations. - Read both references when a task spans contract shape and runtime behavior. 2. Implement the smallest abstraction that fits the task. - Default to direct `useQuery(...)` or `useMutation(...)` calls with oRPC helpers at the call site. - Extract a small shared query helper only when multiple call sites share the same extra options. - - Create `web/service/use-{domain}.ts` only for orchestration or shared domain behavior. + - Create or keep feature hooks only for real orchestration or shared domain behavior. + - When touching thin `web/service/use-*` wrappers, migrate them away when feasible. 3. Preserve Dify conventions. - Keep contract inputs in `{ params, query?, body? }` shape. - - Bind invalidation in the service-layer mutation definition. + - Bind default cache updates/invalidation in `createTanstackQueryUtils(...experimental_defaults...)`; use feature hooks only for workflows that cannot be expressed as default operation behavior. - Prefer `mutate(...)`; use `mutateAsync(...)` only when Promise semantics are required. ## Files Commonly Touched @@ -33,7 +35,7 @@ description: Guide for implementing Dify frontend query and mutation patterns wi - `web/contract/marketplace.ts` - `web/contract/router.ts` - `web/service/client.ts` -- `web/service/use-*.ts` +- legacy `web/service/use-*.ts` files when migrating wrappers away - component and hook call sites using `consoleQuery` or `marketplaceQuery` ## References diff --git a/.agents/skills/frontend-query-mutation/agents/openai.yaml b/.agents/skills/frontend-query-mutation/agents/openai.yaml index 87f7ae6ea4..79e7e7d214 100644 --- a/.agents/skills/frontend-query-mutation/agents/openai.yaml +++ b/.agents/skills/frontend-query-mutation/agents/openai.yaml @@ -1,4 +1,4 @@ interface: display_name: "Frontend Query & Mutation" - short_description: "Dify TanStack Query and oRPC patterns" - default_prompt: "Use this skill when implementing or reviewing Dify frontend contracts, query and mutation call sites, conditional queries, invalidation, or legacy query/mutation migrations." + short_description: "Dify TanStack Query, oRPC, and default option patterns" + default_prompt: "Use this skill when implementing or reviewing Dify frontend contracts, query and mutation call sites, oRPC default options, conditional queries, cache updates/invalidation, or legacy query/mutation migrations." diff --git a/.agents/skills/frontend-query-mutation/references/contract-patterns.md b/.agents/skills/frontend-query-mutation/references/contract-patterns.md index 08016ed2cc..25ccfc81d7 100644 --- a/.agents/skills/frontend-query-mutation/references/contract-patterns.md +++ b/.agents/skills/frontend-query-mutation/references/contract-patterns.md @@ -7,6 +7,7 @@ - Core workflow - Query usage decision rule - Mutation usage decision rule +- Thin hook decision rule - Anti-patterns - Contract rules - Type export @@ -55,9 +56,13 @@ const invoiceQuery = useQuery(consoleQuery.billing.invoices.queryOptions({ 1. Default to direct `*.queryOptions(...)` usage at the call site. 2. If 3 or more call sites share the same extra options, extract a small query helper, not a `use-*` passthrough hook. -3. Create `web/service/use-{domain}.ts` only for orchestration. +3. Create or keep feature hooks only for orchestration. - Combine multiple queries or mutations. - Share domain-level derived state or invalidation helpers. + - Prefer `web/features/{domain}/hooks/*` for feature-owned workflows. +4. Treat `web/service/use-{domain}.ts` as legacy. + - Do not create new thin service wrappers for oRPC contracts. + - When touching existing wrappers, inline direct `consoleQuery` or `marketplaceQuery` consumption when the wrapper is only a passthrough. ```typescript const invoicesBaseQueryOptions = () => @@ -74,11 +79,37 @@ const invoiceQuery = useQuery({ 1. Default to mutation helpers from `consoleQuery` or `marketplaceQuery`, for example `useMutation(consoleQuery.billing.bindPartnerStack.mutationOptions(...))`. 2. If the mutation flow is heavily custom, use oRPC clients as `mutationFn`, for example `consoleClient.xxx` or `marketplaceClient.xxx`, instead of handwritten non-oRPC mutation logic. +```typescript +const createTagMutation = useMutation(consoleQuery.tags.create.mutationOptions()) +``` + +## Thin Hook Decision Rule + +Remove thin hooks when they only rename a single oRPC query or mutation helper. +Keep hooks when they orchestrate business behavior across multiple operations, own local workflow state, or normalize a feature-specific API. +Prefer feature vertical hooks for kept orchestration. Do not move new contract-first wrappers into `web/service/use-*`. + +Use: + +```typescript +const deleteTagMutation = useMutation(consoleQuery.tags.delete.mutationOptions()) +``` + +Keep: + +```typescript +const applyTagBindingsMutation = useApplyTagBindingsMutation() +``` + +`useApplyTagBindingsMutation` is acceptable because it coordinates bind and unbind requests, computes deltas, and exposes a feature-level workflow rather than a single endpoint passthrough. + ## Anti-Patterns - Do not wrap `useQuery` with `options?: Partial`. - Do not split local `queryKey` and `queryFn` when oRPC `queryOptions` already exists and fits the use case. - Do not create thin `use-*` passthrough hooks for a single endpoint. +- Do not create business-layer helpers whose only purpose is to call `consoleQuery.xxx.mutationOptions()` or `queryOptions()`. +- Do not introduce new `web/service/use-*` files for oRPC contract passthroughs. - These patterns can degrade inference, especially around `throwOnError` and `select`, and add unnecessary indirection. ## Contract Rules diff --git a/.agents/skills/frontend-query-mutation/references/runtime-rules.md b/.agents/skills/frontend-query-mutation/references/runtime-rules.md index 73d6fbdded..91b484d438 100644 --- a/.agents/skills/frontend-query-mutation/references/runtime-rules.md +++ b/.agents/skills/frontend-query-mutation/references/runtime-rules.md @@ -3,6 +3,7 @@ ## Table of Contents - Conditional queries +- oRPC default options - Cache invalidation - Key API guide - `mutate` vs `mutateAsync` @@ -35,9 +36,50 @@ function useBadAccessMode(appId: string | undefined) { } ``` +## oRPC Default Options + +Use `experimental_defaults` in `createTanstackQueryUtils` when a contract operation should always carry shared TanStack Query behavior, such as default stale time, mutation cache writes, or invalidation. + +Place defaults at the query utility creation point in `web/service/client.ts`: + +```typescript +export const consoleQuery = createTanstackQueryUtils(consoleClient, { + path: ['console'], + experimental_defaults: { + tags: { + create: { + mutationOptions: { + onSuccess: (tag, _variables, _result, context) => { + context.client.setQueryData( + consoleQuery.tags.list.queryKey({ + input: { + query: { + type: tag.type, + }, + }, + }), + (oldTags: Tag[] | undefined) => oldTags ? [tag, ...oldTags] : oldTags, + ) + }, + }, + }, + }, + }, +}) +``` + +Rules: + +- Keep defaults inline in the `consoleQuery` or `marketplaceQuery` initialization when they need sibling oRPC key builders. +- Do not create a wrapper function solely to host `createTanstackQueryUtils`. +- Do not split defaults into a vertical feature file if that forces handwritten operation paths such as `generateOperationKey(['console', ...])`. +- Keep feature-level orchestration in the feature vertical; keep query utility lifecycle defaults with the query utility. +- Prefer call-site callbacks for UI feedback only; shared cache behavior belongs in oRPC defaults when it is tied to a contract operation. + ## Cache Invalidation -Bind invalidation in the service-layer mutation definition. +Bind shared invalidation in oRPC defaults when it is tied to a contract operation. +Use feature vertical hooks only for multi-operation workflows or domain orchestration that cannot live in a single operation default. Components may add UI feedback in call-site callbacks, but they should not decide which queries to invalidate. Use: @@ -49,7 +91,7 @@ Use: Do not use deprecated `useInvalid` from `use-base.ts`. ```typescript -// Service layer owns cache invalidation. +// Feature orchestration owns cache invalidation only when defaults are not enough. export const useUpdateAccessMode = () => { const queryClient = useQueryClient() @@ -124,7 +166,7 @@ When touching old code, migrate it toward these rules: | Old pattern | New pattern | |---|---| -| `useInvalid(key)` in service layer | `queryClient.invalidateQueries(...)` inside mutation `onSuccess` | -| component-triggered invalidation after mutation | move invalidation into the service-layer mutation definition | +| `useInvalid(key)` in service wrappers | oRPC defaults, or a feature vertical hook for real orchestration | +| component-triggered invalidation after mutation | move invalidation into oRPC defaults or a feature vertical hook | | imperative fetch plus manual invalidation | wrap it in `useMutation(...mutationOptions(...))` | | `await mutateAsync()` without `try/catch` | switch to `mutate(...)` or add `try/catch` | diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index bd47abc710..a08e7aacae 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -99,7 +99,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 76fbd18f47..9c2c6e2ca9 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -116,6 +116,12 @@ jobs: if: github.event_name != 'merge_group' uses: ./.github/actions/setup-web + - name: Generate API docs + if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' + run: | + cd api + uv run dev/generate_swagger_markdown_docs.py --swagger-dir openapi --markdown-dir openapi/markdown + - name: ESLint autofix if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' run: | diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml index 65f0149a74..9d3ccb34b2 100644 --- a/.github/workflows/db-migration-test.yml +++ b/.github/workflows/db-migration-test.yml @@ -37,7 +37,7 @@ jobs: - name: Prepare middleware env run: | cd docker - cp middleware.env.example middleware.env + cp envs/middleware.env.example middleware.env - name: Set up Middlewares uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 @@ -87,7 +87,7 @@ jobs: - name: Prepare middleware env for MySQL run: | cd docker - cp middleware.env.example middleware.env + cp envs/middleware.env.example middleware.env sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env diff --git a/.github/workflows/main-ci.yml b/.github/workflows/main-ci.yml index 8071d6204d..f624e8f872 100644 --- a/.github/workflows/main-ci.yml +++ b/.github/workflows/main-ci.yml @@ -57,7 +57,7 @@ jobs: - '.github/workflows/api-tests.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.middleware.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' @@ -84,7 +84,7 @@ jobs: - 'pnpm-workspace.yaml' - '.nvmrc' - 'docker/docker-compose.middleware.yaml' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - '.github/workflows/web-e2e.yml' - '.github/actions/setup-web/**' vdb: @@ -94,7 +94,7 @@ jobs: - '.github/workflows/vdb-tests.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' @@ -116,7 +116,7 @@ jobs: - '.github/workflows/db-migration-test.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.middleware.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 3428516b60..9d4d9d1ef4 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -107,6 +107,8 @@ jobs: - name: Web tsslint if: steps.changed-files.outputs.any_changed == 'true' working-directory: ./web + env: + NODE_OPTIONS: --max-old-space-size=4096 run: vp run lint:tss - name: Web type check diff --git a/.github/workflows/vdb-tests-full.yml b/.github/workflows/vdb-tests-full.yml index 5c241af5c5..1405eb4eeb 100644 --- a/.github/workflows/vdb-tests-full.yml +++ b/.github/workflows/vdb-tests-full.yml @@ -51,7 +51,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index 38ec96f00f..cdcdcb27d7 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -48,7 +48,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/Makefile b/Makefile index d8c9df5208..ae7589bbd6 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ type-check: @echo "📝 Running type checks (basedpyright + pyrefly + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) @./dev/pyrefly-check-local - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Type checks complete" type-check-core: @echo "📝 Running core type checks (basedpyright + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --exclude 'dev/generate_fastopenapi_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Core type checks complete" test: diff --git a/README.md b/README.md index 778028fc76..b6cbb0e126 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ Star Dify on GitHub and be instantly notified of new releases. ### Custom configurations -If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). +If you need to customize the configuration, edit `docker/.env`. The essential startup defaults live in [`docker/.env.example`](docker/.env.example), and optional advanced variables are split under `docker/envs/` by theme. After making any changes, re-run `docker compose up -d` from the `docker` directory. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). ### Metrics Monitoring with Grafana diff --git a/api/.env.example b/api/.env.example index f6f65011ea..56ba8a6c5d 100644 --- a/api/.env.example +++ b/api/.env.example @@ -98,6 +98,8 @@ DB_DATABASE=dify SQLALCHEMY_POOL_PRE_PING=true SQLALCHEMY_POOL_TIMEOUT=30 +# Connection pool reset behavior on return +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback # Storage configuration # use for store upload files, private keys... @@ -381,7 +383,7 @@ VIKINGDB_ACCESS_KEY=your-ak VIKINGDB_SECRET_KEY=your-sk VIKINGDB_REGION=cn-shanghai VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http +VIKINGDB_SCHEME=http VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30 @@ -432,8 +434,6 @@ UPLOAD_FILE_EXTENSION_BLACKLIST= # Model configuration MULTIMODAL_SEND_FORMAT=base64 -PROMPT_GENERATION_MAX_TOKENS=512 -CODE_GENERATION_MAX_TOKENS=1024 PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false # Mail configuration, support: resend, smtp, sendgrid diff --git a/api/Dockerfile b/api/Dockerfile index c16de9ef67..141d4a6adb 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -17,7 +17,7 @@ FROM base AS packages RUN apt-get update \ && apt-get install -y --no-install-recommends \ # basic environment - g++ \ + git g++ \ # for building gmpy2 libmpfr-dev libmpc-dev diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py index c392b8840f..ee8b93aa9f 100644 --- a/api/configs/middleware/__init__.py +++ b/api/configs/middleware/__init__.py @@ -114,7 +114,7 @@ class SQLAlchemyEngineOptionsDict(TypedDict): pool_pre_ping: bool connect_args: dict[str, str] pool_use_lifo: bool - pool_reset_on_return: None + pool_reset_on_return: Literal["commit", "rollback", None] pool_timeout: int @@ -223,6 +223,11 @@ class DatabaseConfig(BaseSettings): default=30, ) + SQLALCHEMY_POOL_RESET_ON_RETURN: Literal["commit", "rollback", None] = Field( + description="Connection pool reset behavior on return. Options: 'commit', 'rollback', or None", + default="rollback", + ) + RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field( description="Number of processes for the retrieval service, default to CPU cores.", default=os.cpu_count() or 1, @@ -252,7 +257,7 @@ class DatabaseConfig(BaseSettings): "pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING, "connect_args": connect_args, "pool_use_lifo": self.SQLALCHEMY_POOL_USE_LIFO, - "pool_reset_on_return": None, + "pool_reset_on_return": self.SQLALCHEMY_POOL_RESET_ON_RETURN, "pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT, } return result diff --git a/api/constants/recommended_apps.json b/api/constants/recommended_apps.json index 3779fb0180..3d728f1b2e 100644 --- a/api/constants/recommended_apps.json +++ b/api/constants/recommended_apps.json @@ -19,7 +19,7 @@ "name": "Website Generator" }, "app_id": "b53545b1-79ea-4da3-b31a-c39391c6f041", - "category": "Programming", + "categories": ["Programming"], "copyright": null, "description": null, "is_listed": true, @@ -35,7 +35,7 @@ "name": "Investment Analysis Report Copilot" }, "app_id": "a23b57fa-85da-49c0-a571-3aff375976c1", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "Welcome to your personalized Investment Analysis Copilot service, where we delve into the depths of stock analysis to provide you with comprehensive insights. \n", "is_listed": true, @@ -51,7 +51,7 @@ "name": "Workflow Planning Assistant " }, "app_id": "f3303a7d-a81c-404e-b401-1f8711c998c1", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "An assistant that helps you plan and select the right node for a workflow (V0.6.0). ", "is_listed": true, @@ -67,7 +67,7 @@ "name": "Automated Email Reply " }, "app_id": "e9d92058-7d20-4904-892f-75d90bef7587", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Reply emails using Gmail API. It will automatically retrieve email in your inbox and create a response in Gmail. \nConfigure your Gmail API in Google Cloud Console. ", "is_listed": true, @@ -83,7 +83,7 @@ "name": "Book Translation " }, "app_id": "98b87f88-bd22-4d86-8b74-86beba5e0ed4", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A workflow designed to translate a full book up to 15000 tokens per run. Uses Code node to separate text into chunks and Iteration to translate each chunk. ", "is_listed": true, @@ -99,7 +99,7 @@ "name": "Python bug fixer" }, "app_id": "cae337e6-aec5-4c7b-beca-d6f1a808bd5e", - "category": "Programming", + "categories": ["Programming"], "copyright": null, "description": null, "is_listed": true, @@ -115,7 +115,7 @@ "name": "Code Interpreter" }, "app_id": "d077d587-b072-4f2c-b631-69ed1e7cdc0f", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "Code interpreter, clarifying the syntax and semantics of the code.", "is_listed": true, @@ -131,7 +131,7 @@ "name": "SVG Logo Design " }, "app_id": "73fbb5f1-c15d-4d74-9cc8-46d9db9b2cca", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "Hello, I am your creative partner in bringing ideas to vivid life! I can assist you in creating stunning designs by leveraging abilities of DALL·E 3. ", "is_listed": true, @@ -147,7 +147,7 @@ "name": "Long Story Generator (Iteration) " }, "app_id": "5efb98d7-176b-419c-b6ef-50767391ab62", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A workflow demonstrating how to use Iteration node to generate long article that is longer than the context length of LLMs. ", "is_listed": true, @@ -163,7 +163,7 @@ "name": "Text Summarization Workflow" }, "app_id": "f00c4531-6551-45ee-808f-1d7903099515", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Based on users' choice, retrieve external knowledge to more accurately summarize articles.", "is_listed": true, @@ -179,7 +179,7 @@ "name": "YouTube Channel Data Analysis" }, "app_id": "be591209-2ca8-410f-8f3b-ca0e530dd638", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "I am a YouTube Channel Data Analysis Copilot, I am here to provide expert data analysis tailored to your needs. ", "is_listed": true, @@ -195,7 +195,7 @@ "name": "Article Grading Bot" }, "app_id": "a747f7b4-c48b-40d6-b313-5e628232c05f", - "category": "Writing", + "categories": ["Writing"], "copyright": null, "description": "Assess the quality of articles and text based on user defined criteria. ", "is_listed": true, @@ -211,7 +211,7 @@ "name": "SEO Blog Generator" }, "app_id": "18f3bd03-524d-4d7a-8374-b30dbe7c69d5", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Workflow for retrieving information from the internet, followed by segmented generation of SEO blogs.", "is_listed": true, @@ -227,7 +227,7 @@ "name": "SQL Creator" }, "app_id": "050ef42e-3e0c-40c1-a6b6-a64f2c49d744", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "Write SQL from natural language by pasting in your schema with the request.Please describe your query requirements in natural language and select the target database type.", "is_listed": true, @@ -243,7 +243,7 @@ "name": "Sentiment Analysis " }, "app_id": "f06bf86b-d50c-4895-a942-35112dbe4189", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Batch sentiment analysis of text, followed by JSON output of sentiment classification along with scores.", "is_listed": true, @@ -259,7 +259,7 @@ "name": "Strategic Consulting Expert" }, "app_id": "7e8ca1ae-02f2-4b5f-979e-62d19133bee2", - "category": "Assistant", + "categories": ["Assistant"], "copyright": "Copyright 2023 Dify", "description": "I can answer your questions related to strategic marketing.", "is_listed": true, @@ -275,7 +275,7 @@ "name": "Code Converter" }, "app_id": "4006c4b2-0735-4f37-8dbb-fb1a8c5bd87a", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "This is an application that provides the ability to convert code snippets in multiple programming languages. You can input the code you wish to convert, select the target programming language, and get the desired output.", "is_listed": true, @@ -291,7 +291,7 @@ "name": "Question Classifier + Knowledge + Chatbot " }, "app_id": "d9f6b733-e35d-4a40-9f38-ca7bbfa009f7", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Basic Workflow Template, a chatbot capable of identifying intents alongside with a knowledge base.", "is_listed": true, @@ -307,7 +307,7 @@ "name": "AI Front-end interviewer" }, "app_id": "127efead-8944-4e20-ba9d-12402eb345e0", - "category": "HR", + "categories": ["HR"], "copyright": "Copyright 2023 Dify", "description": "A simulated front-end interviewer that tests the skill level of front-end development through questioning.", "is_listed": true, @@ -323,7 +323,7 @@ "name": "Knowledge Retrieval + Chatbot " }, "app_id": "e9870913-dd01-4710-9f06-15d4180ca1ce", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Basic Workflow Template, A chatbot with a knowledge base. ", "is_listed": true, @@ -339,7 +339,7 @@ "name": "Email Assistant Workflow " }, "app_id": "dd5b6353-ae9b-4bce-be6a-a681a12cf709", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A multifunctional email assistant capable of summarizing, replying, composing, proofreading, and checking grammar.", "is_listed": true, @@ -355,7 +355,7 @@ "name": "Customer Review Analysis Workflow " }, "app_id": "9c0cd31f-4b62-4005-adf5-e3888d08654a", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Utilize LLM (Large Language Models) to classify customer reviews and forward them to the internal system.", "is_listed": true, diff --git a/api/controllers/common/human_input.py b/api/controllers/common/human_input.py index 5d6f4efb95..4015650e1f 100644 --- a/api/controllers/common/human_input.py +++ b/api/controllers/common/human_input.py @@ -1,6 +1,36 @@ -from pydantic import BaseModel, JsonValue +from pydantic import BaseModel, Field, JsonValue + +HUMAN_INPUT_FORM_INPUT_EXAMPLE = { + "decision": "approve", + "attachment": { + "transfer_method": "local_file", + "upload_file_id": "4e0d1b87-52f2-49f6-b8c6-95cd9c954b3e", + "type": "document", + }, + "attachments": [ + { + "transfer_method": "local_file", + "upload_file_id": "1a77f0df-c0e6-461c-987c-e72526f341ee", + "type": "document", + }, + { + "transfer_method": "remote_url", + "url": "https://example.com/report.pdf", + "type": "document", + }, + ], +} class HumanInputFormSubmitPayload(BaseModel): - inputs: dict[str, JsonValue] + inputs: dict[str, JsonValue] = Field( + description=( + "Submitted human input values keyed by output variable name. " + "Use a string for paragraph or select input values, a file mapping for file inputs, " + "and a list of file mappings for file-list inputs. Local file mappings use " + "`transfer_method=local_file` with `upload_file_id`; remote file mappings use " + "`transfer_method=remote_url` with `url` or `remote_url`." + ), + examples=[HUMAN_INPUT_FORM_INPUT_EXAMPLE], + ) action: str diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py index 8d112c203b..0c5e23c29c 100644 --- a/api/controllers/common/schema.py +++ b/api/controllers/common/schema.py @@ -1,4 +1,10 @@ -"""Helpers for registering Pydantic models with Flask-RESTX namespaces.""" +"""Helpers for registering Pydantic models with Flask-RESTX namespaces. + +Flask-RESTX treats `SchemaModel` bodies as opaque JSON schemas; it does not +promote Pydantic's nested `$defs` into top-level Swagger `definitions`. +These helpers keep that translation centralized so models registered through +`register_schema_models` emit resolvable Swagger 2.0 references. +""" from enum import StrEnum @@ -8,10 +14,32 @@ from pydantic import BaseModel, TypeAdapter DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" -def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: - """Register a single BaseModel with a namespace for Swagger documentation.""" +def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None: + """Register a JSON schema and promote any nested Pydantic `$defs`.""" - namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) + nested_definitions = schema.get("$defs") + schema_to_register = dict(schema) + if isinstance(nested_definitions, dict): + schema_to_register.pop("$defs") + + namespace.schema_model(name, schema_to_register) + + if not isinstance(nested_definitions, dict): + return + + for nested_name, nested_schema in nested_definitions.items(): + if isinstance(nested_schema, dict): + _register_json_schema(namespace, nested_name, nested_schema) + + +def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: + """Register a BaseModel and its nested schema definitions for Swagger documentation.""" + + _register_json_schema( + namespace, + model.__name__, + model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), + ) def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: @@ -34,8 +62,10 @@ def get_or_create_model(model_name: str, field_def): def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: """Register multiple StrEnum with a namespace.""" for model in models: - namespace.schema_model( - model.__name__, TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + _register_json_schema( + namespace, + model.__name__, + TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), ) diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index dce394be97..a32c3420bb 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -12,6 +12,7 @@ from werkzeug.exceptions import BadRequest, NotFound, Unauthorized from configs import dify_config from constants.languages import supported_language +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import only_edition_cloud from core.db.session_factory import session_factory @@ -301,15 +302,7 @@ class BatchAddNotificationAccountsPayload(BaseModel): user_email: list[str] = Field(..., description="List of account email addresses") -console_ns.schema_model( - UpsertNotificationPayload.__name__, - UpsertNotificationPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - -console_ns.schema_model( - BatchAddNotificationAccountsPayload.__name__, - BatchAddNotificationAccountsPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, UpsertNotificationPayload, BatchAddNotificationAccountsPayload) @console_ns.route("/admin/upsert_notification") diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index ae9bea7b70..d03c00c2a0 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -1,4 +1,5 @@ import logging +import re import uuid from datetime import datetime from typing import Any, Literal @@ -8,6 +9,7 @@ from flask_restx import Resource from pydantic import AliasChoices, BaseModel, Field, computed_field, field_validator from sqlalchemy import select from sqlalchemy.orm import Session +from werkzeug.datastructures import MultiDict from werkzeug.exceptions import BadRequest from controllers.common.helpers import FileInfo @@ -23,6 +25,7 @@ from controllers.console.wraps import ( is_admin_or_owner_required, setup_required, ) +from core.db.session_factory import session_factory from core.ops.ops_trace_manager import OpsTraceManager from core.rag.entities import PreProcessingRule, Rule, Segmentation from core.rag.retrieval.retrieval_methods import RetrievalMethod @@ -58,6 +61,7 @@ ALLOW_CREATE_APP_MODES = ["chat", "agent-chat", "advanced-chat", "workflow", "co register_enum_models(console_ns, IconType) _logger = logging.getLogger(__name__) +_TAG_IDS_BRACKET_PATTERN = re.compile(r"^tag_ids\[(\d+)\]$") class AppListQuery(BaseModel): @@ -67,22 +71,19 @@ class AppListQuery(BaseModel): default="all", description="App mode filter" ) name: str | None = Field(default=None, description="Filter by app name") - tag_ids: list[str] | None = Field(default=None, description="Comma-separated tag IDs") + tag_ids: list[str] | None = Field(default=None, description="Filter by tag IDs") is_created_by_me: bool | None = Field(default=None, description="Filter by creator") @field_validator("tag_ids", mode="before") @classmethod - def validate_tag_ids(cls, value: str | list[str] | None) -> list[str] | None: + def validate_tag_ids(cls, value: list[str] | None) -> list[str] | None: if not value: return None - if isinstance(value, str): - items = [item.strip() for item in value.split(",") if item.strip()] - elif isinstance(value, list): - items = [str(item).strip() for item in value if item and str(item).strip()] - else: - raise TypeError("Unsupported tag_ids type.") + if not isinstance(value, list): + raise ValueError("Unsupported tag_ids type.") + items = [str(item).strip() for item in value if item and str(item).strip()] if not items: return None @@ -92,6 +93,26 @@ class AppListQuery(BaseModel): raise ValueError("Invalid UUID format in tag_ids.") from exc +def _normalize_app_list_query_args(query_args: MultiDict[str, str]) -> dict[str, str | list[str]]: + normalized: dict[str, str | list[str]] = {} + indexed_tag_ids: list[tuple[int, str]] = [] + + for key in query_args: + match = _TAG_IDS_BRACKET_PATTERN.fullmatch(key) + if match: + indexed_tag_ids.extend((int(match.group(1)), value) for value in query_args.getlist(key)) + continue + + value = query_args.get(key) + if value is not None: + normalized[key] = value + + if indexed_tag_ids: + normalized["tag_ids"] = [value for _, value in sorted(indexed_tag_ids)] + + return normalized + + class CreateAppPayload(BaseModel): name: str = Field(..., min_length=1, description="App name") description: str | None = Field(default=None, description="App description (max 400 chars)", max_length=400) @@ -460,7 +481,7 @@ class AppListApi(Resource): """Get app list""" current_user, current_tenant_id = current_account_with_tenant() - args = AppListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = AppListQuery.model_validate(_normalize_app_list_query_args(request.args)) args_dict = args.model_dump() # get app list @@ -857,7 +878,8 @@ class AppTraceApi(Resource): @account_initialization_required def get(self, app_id): """Get app trace""" - app_trace_config = OpsTraceManager.get_app_tracing_config(app_id=app_id) + with session_factory.create_session() as session: + app_trace_config = OpsTraceManager.get_app_tracing_config(app_id, session) return app_trace_config diff --git a/api/controllers/console/app/app_import.py b/api/controllers/console/app/app_import.py index e91dc9cfe5..b653016319 100644 --- a/api/controllers/console/app/app_import.py +++ b/api/controllers/console/app/app_import.py @@ -2,7 +2,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field from sqlalchemy.orm import Session -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console.app.wraps import get_app_model from controllers.console.wraps import ( account_initialization_required, @@ -33,6 +33,7 @@ class AppImportPayload(BaseModel): app_id: str | None = Field(None) +register_enum_models(console_ns, ImportStatus) register_schema_models(console_ns, AppImportPayload, Import, CheckDependenciesResult) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index c720a5e074..d4f501d34c 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -3,6 +3,7 @@ from collections.abc import Sequence from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( CompletionRequestError, @@ -19,13 +20,12 @@ from core.helper.code_executor.python3.python3_code_provider import Python3CodeP from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload from core.llm_generator.llm_generator import LLMGenerator from extensions.ext_database import db +from graphon.model_runtime.entities.llm_entities import LLMMode from graphon.model_runtime.errors.invoke import InvokeError from libs.login import current_account_with_tenant, login_required from models import App from services.workflow_service import WorkflowService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class InstructionGeneratePayload(BaseModel): flow_id: str = Field(..., description="Workflow/Flow ID") @@ -41,16 +41,16 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(RuleGeneratePayload) -reg(RuleCodeGeneratePayload) -reg(RuleStructuredOutputPayload) -reg(InstructionGeneratePayload) -reg(InstructionTemplatePayload) -reg(ModelConfig) +register_enum_models(console_ns, LLMMode) +register_schema_models( + console_ns, + RuleGeneratePayload, + RuleCodeGeneratePayload, + RuleStructuredOutputPayload, + InstructionGeneratePayload, + InstructionTemplatePayload, + ModelConfig, +) @console_ns.route("/rule-generate") diff --git a/api/controllers/console/app/workflow_draft_variable.py b/api/controllers/console/app/workflow_draft_variable.py index e32ba5f66c..c688a69074 100644 --- a/api/controllers/console/app/workflow_draft_variable.py +++ b/api/controllers/console/app/workflow_draft_variable.py @@ -75,14 +75,15 @@ console_ns.schema_model( def _convert_values_to_json_serializable_object(value: Segment): - if isinstance(value, FileSegment): - return value.value.model_dump() - elif isinstance(value, ArrayFileSegment): - return [i.model_dump() for i in value.value] - elif isinstance(value, SegmentGroup): - return [_convert_values_to_json_serializable_object(i) for i in value.value] - else: - return value.value + match value: + case FileSegment(): + return value.value.model_dump() + case ArrayFileSegment(): + return [i.model_dump() for i in value.value] + case SegmentGroup(): + return [_convert_values_to_json_serializable_object(i) for i in value.value] + case _: + return value.value def _serialize_var_value(variable: WorkflowDraftVariable): diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 55bd679b48..fa65c8daf1 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -52,7 +52,7 @@ class RecommendedAppResponse(ResponseModel): copyright: str | None = None privacy_policy: str | None = None custom_disclaimer: str | None = None - category: str | None = None + categories: list[str] = Field(default_factory=list) position: int | None = None is_listed: bool | None = None can_trial: bool | None = None diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py index f73e2da54e..b9e876c906 100644 --- a/api/controllers/console/tag/tags.py +++ b/api/controllers/console/tag/tags.py @@ -32,12 +32,7 @@ class TagBindingPayload(BaseModel): class TagBindingRemovePayload(BaseModel): - tag_id: str = Field(description="Tag ID to remove") - target_id: str = Field(description="Target ID to unbind tag from") - type: TagType = Field(description="Tag type") - - -class TagBindingItemDeletePayload(BaseModel): + tag_ids: list[str] = Field(description="Tag IDs to remove", min_length=1) target_id: str = Field(description="Target ID to unbind tag from") type: TagType = Field(description="Tag type") @@ -75,7 +70,6 @@ register_schema_models( TagBasePayload, TagBindingPayload, TagBindingRemovePayload, - TagBindingItemDeletePayload, TagListQueryParam, TagResponse, ) @@ -184,13 +178,13 @@ def _create_tag_bindings() -> tuple[dict[str, str], int]: return {"result": "success"}, 200 -def _remove_tag_binding() -> tuple[dict[str, str], int]: +def _remove_tag_bindings() -> tuple[dict[str, str], int]: _require_tag_binding_edit_permission() payload = TagBindingRemovePayload.model_validate(console_ns.payload or {}) TagService.delete_tag_binding( TagBindingDeletePayload( - tag_id=payload.tag_id, + tag_ids=payload.tag_ids, target_id=payload.target_id, type=payload.type, ) @@ -211,54 +205,15 @@ class TagBindingCollectionApi(Resource): return _create_tag_bindings() -@console_ns.route("/tag-bindings/") -class TagBindingItemApi(Resource): - """Canonical item resource for tag binding deletion.""" - - @console_ns.doc("delete_tag_binding") - @console_ns.doc(params={"id": "Tag ID"}) - @console_ns.expect(console_ns.models[TagBindingItemDeletePayload.__name__]) - @setup_required - @login_required - @account_initialization_required - def delete(self, id): - _require_tag_binding_edit_permission() - payload = TagBindingItemDeletePayload.model_validate(console_ns.payload or {}) - TagService.delete_tag_binding( - TagBindingDeletePayload( - tag_id=str(id), - target_id=payload.target_id, - type=payload.type, - ) - ) - return {"result": "success"}, 200 - - -@console_ns.route("/tag-bindings/create") -class DeprecatedTagBindingCreateApi(Resource): - """Deprecated verb-based alias for tag binding creation.""" - - @console_ns.doc("create_tag_binding_deprecated") - @console_ns.doc(deprecated=True) - @console_ns.doc(description="Deprecated legacy alias. Use POST /tag-bindings instead.") - @console_ns.expect(console_ns.models[TagBindingPayload.__name__]) - @setup_required - @login_required - @account_initialization_required - def post(self): - return _create_tag_bindings() - - @console_ns.route("/tag-bindings/remove") -class DeprecatedTagBindingRemoveApi(Resource): - """Deprecated verb-based alias for tag binding deletion.""" +class TagBindingRemoveApi(Resource): + """Batch resource for tag binding deletion.""" - @console_ns.doc("delete_tag_binding_deprecated") - @console_ns.doc(deprecated=True) - @console_ns.doc(description="Deprecated legacy alias. Use DELETE /tag-bindings/{id} instead.") + @console_ns.doc("remove_tag_bindings") + @console_ns.doc(description="Remove one or more tag bindings from a target.") @console_ns.expect(console_ns.models[TagBindingRemovePayload.__name__]) @setup_required @login_required @account_initialization_required def post(self): - return _remove_tag_binding() + return _remove_tag_bindings() diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 34c9534de8..e653c9064c 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -876,10 +876,10 @@ class ToolBuiltinProviderSetDefaultApi(Resource): @login_required @account_initialization_required def post(self, provider): - current_user, current_tenant_id = current_account_with_tenant() + _, current_tenant_id = current_account_with_tenant() payload = BuiltinProviderDefaultCredentialPayload.model_validate(console_ns.payload or {}) return BuiltinToolManageService.set_default_provider( - tenant_id=current_tenant_id, user_id=current_user.id, provider=provider, id=payload.id + tenant_id=current_tenant_id, provider=provider, id=payload.id ) diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 76519cad0a..3eb773fa7c 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -2,7 +2,7 @@ from typing import Any, Literal, cast from flask import request from flask_restx import marshal -from pydantic import BaseModel, Field, TypeAdapter, field_validator +from pydantic import BaseModel, Field, TypeAdapter, field_validator, model_validator from werkzeug.exceptions import Forbidden, NotFound import services @@ -100,9 +100,27 @@ class TagBindingPayload(BaseModel): class TagUnbindingPayload(BaseModel): - tag_id: str + """Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally.""" + + tag_ids: list[str] = Field(default_factory=list) + tag_id: str | None = None target_id: str + @model_validator(mode="before") + @classmethod + def normalize_legacy_tag_id(cls, data: object) -> object: + if not isinstance(data, dict): + return data + if not data.get("tag_ids") and data.get("tag_id"): + return {**data, "tag_ids": [data["tag_id"]]} + return data + + @model_validator(mode="after") + def validate_tag_ids(self) -> "TagUnbindingPayload": + if not self.tag_ids: + raise ValueError("Tag IDs is required.") + return self + class DatasetListQuery(BaseModel): page: int = Field(default=1, description="Page number") @@ -601,11 +619,11 @@ class DatasetTagBindingApi(DatasetApiResource): @service_api_ns.route("/datasets/tags/unbinding") class DatasetTagUnbindingApi(DatasetApiResource): @service_api_ns.expect(service_api_ns.models[TagUnbindingPayload.__name__]) - @service_api_ns.doc("unbind_dataset_tag") - @service_api_ns.doc(description="Unbind a tag from a dataset") + @service_api_ns.doc("unbind_dataset_tags") + @service_api_ns.doc(description="Unbind tags from a dataset") @service_api_ns.doc( responses={ - 204: "Tag unbound successfully", + 204: "Tags unbound successfully", 401: "Unauthorized - invalid API token", 403: "Forbidden - insufficient permissions", } @@ -618,7 +636,7 @@ class DatasetTagUnbindingApi(DatasetApiResource): payload = TagUnbindingPayload.model_validate(service_api_ns.payload or {}) TagService.delete_tag_binding( - TagBindingDeletePayload(tag_id=payload.tag_id, target_id=payload.target_id, type=TagType.KNOWLEDGE) + TagBindingDeletePayload(tag_ids=payload.tag_ids, target_id=payload.target_id, type=TagType.KNOWLEDGE) ) return "", 204 diff --git a/api/controllers/web/__init__.py b/api/controllers/web/__init__.py index cfa39e0dfd..d4b0829dea 100644 --- a/api/controllers/web/__init__.py +++ b/api/controllers/web/__init__.py @@ -23,6 +23,7 @@ from . import ( feature, files, forgot_password, + human_input_file_upload, human_input_form, login, message, @@ -46,6 +47,7 @@ __all__ = [ "feature", "files", "forgot_password", + "human_input_file_upload", "human_input_form", "login", "message", diff --git a/api/controllers/web/audio.py b/api/controllers/web/audio.py index 3ad595f1f4..8ddbc3abb8 100644 --- a/api/controllers/web/audio.py +++ b/api/controllers/web/audio.py @@ -23,7 +23,7 @@ from controllers.web.wraps import WebApiResource from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from graphon.model_runtime.errors.invoke import InvokeError from libs.helper import uuid_value -from models.model import App +from models.model import App, EndUser from services.audio_service import AudioService from services.errors.audio import ( AudioTooLargeServiceError, @@ -69,12 +69,12 @@ class AudioApi(WebApiResource): 500: "Internal Server Error", } ) - def post(self, app_model: App, end_user): + def post(self, app_model: App, end_user: EndUser): """Convert audio to text""" file = request.files["file"] try: - response = AudioService.transcript_asr(app_model=app_model, file=file, end_user=end_user) + response = AudioService.transcript_asr(app_model=app_model, file=file, end_user=end_user.external_user_id) return response except services.errors.app_model_config.AppModelConfigBrokenError: @@ -117,7 +117,7 @@ class TextApi(WebApiResource): 500: "Internal Server Error", } ) - def post(self, app_model: App, end_user): + def post(self, app_model: App, end_user: EndUser): """Convert text to audio""" try: payload = TextToAudioPayload.model_validate(web_ns.payload or {}) diff --git a/api/controllers/web/human_input_file_upload.py b/api/controllers/web/human_input_file_upload.py new file mode 100644 index 0000000000..56665781e7 --- /dev/null +++ b/api/controllers/web/human_input_file_upload.py @@ -0,0 +1,181 @@ +import httpx +from flask import request +from flask_restx import Resource +from pydantic import BaseModel, Field, HttpUrl + +import services +from controllers.common import helpers +from controllers.common.errors import ( + BlockedFileExtensionError, + FileTooLargeError, + NoFileUploadedError, + RemoteFileUploadError, + TooManyFilesError, + UnsupportedFileTypeError, +) +from controllers.common.schema import register_schema_models +from controllers.web import web_ns +from core.helper import ssrf_proxy +from extensions.ext_database import db +from fields.file_fields import FileResponse, FileWithSignedUrl +from graphon.file import helpers as file_helpers +from libs.exception import BaseHTTPException +from services.file_service import FileService +from services.human_input_file_upload_service import ( + HITL_UPLOAD_TOKEN_PREFIX, + HumanInputFileUploadService, + InvalidUploadTokenError, +) + + +class InvalidUploadTokenBadRequestError(BaseHTTPException): + error_code = "invalid_upload_token" + description = "Invalid upload token." + code = 400 + + +class InvalidUploadTokenUnauthorizedError(BaseHTTPException): + error_code = "invalid_upload_token" + description = "Upload token is required." + code = 401 + + +class InvalidUploadTokenForbiddenError(BaseHTTPException): + error_code = "invalid_upload_token" + description = "Upload token is invalid or expired." + code = 403 + + +class HumanInputRemoteFileUploadPayload(BaseModel): + url: HttpUrl = Field(description="Remote file URL") + + +register_schema_models(web_ns, HumanInputRemoteFileUploadPayload, FileResponse, FileWithSignedUrl) + + +def _extract_hitl_upload_token() -> str: + """Read HITL upload token from Authorization without invoking other bearer auth chains.""" + + authorization = request.headers.get("Authorization") + if authorization is None: + raise InvalidUploadTokenUnauthorizedError() + + parts = authorization.split() + if len(parts) != 2: + raise InvalidUploadTokenUnauthorizedError() + + scheme, token = parts + if scheme.lower() != "bearer": + raise InvalidUploadTokenBadRequestError() + if not token: + raise InvalidUploadTokenUnauthorizedError() + if not token.startswith(HITL_UPLOAD_TOKEN_PREFIX): + raise InvalidUploadTokenBadRequestError() + return token + + +def _validate_context(service: HumanInputFileUploadService, token: str): + try: + return service.validate_upload_token(token) + except InvalidUploadTokenError as exc: + raise InvalidUploadTokenForbiddenError() from exc + + +def _parse_local_upload_file(): + if "file" not in request.files: + raise NoFileUploadedError() + if len(request.files) > 1: + raise TooManyFilesError() + + file = request.files["file"] + if not file.filename: + from controllers.common.errors import FilenameNotExistsError + + raise FilenameNotExistsError() + + return file + + +@web_ns.route("/form/human_input/files/upload") +class HumanInputFileUploadApi(Resource): + def post(self): + """Upload one local file for a HITL human input form.""" + + token = _extract_hitl_upload_token() + upload_service = HumanInputFileUploadService(db.engine) + context = _validate_context(upload_service, token) + file = _parse_local_upload_file() + + try: + upload_file = FileService(db.engine).upload_file( + filename=file.filename or "", + content=file.read(), + mimetype=file.mimetype, + user=context.owner, + source=None, + ) + except services.errors.file.FileTooLargeError as file_too_large_error: + raise FileTooLargeError(file_too_large_error.description) + except services.errors.file.UnsupportedFileTypeError: + raise UnsupportedFileTypeError() + except services.errors.file.BlockedFileExtensionError as exc: + raise BlockedFileExtensionError() from exc + + upload_service.record_upload_file(context=context, file_id=upload_file.id) + response = FileResponse.model_validate(upload_file, from_attributes=True) + return response.model_dump(mode="json"), 201 + + +@web_ns.route("/form/human_input/files/remote-upload") +class HumanInputRemoteFileUploadApi(Resource): + def post(self): + """Upload one remote URL file for a HITL human input form.""" + + token = _extract_hitl_upload_token() + upload_service = HumanInputFileUploadService(db.engine) + context = _validate_context(upload_service, token) + payload = HumanInputRemoteFileUploadPayload.model_validate(request.get_json(silent=True) or {}) + url = str(payload.url) + + try: + resp = ssrf_proxy.head(url=url) + if resp.status_code != httpx.codes.OK: + resp = ssrf_proxy.get(url=url, timeout=3, follow_redirects=True) + if resp.status_code != httpx.codes.OK: + raise RemoteFileUploadError(f"Failed to fetch file from {url}: {resp.text}") + except httpx.RequestError as exc: + raise RemoteFileUploadError(f"Failed to fetch file from {url}: {str(exc)}") + + file_info = helpers.guess_file_info_from_response(resp) + if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size): + raise FileTooLargeError() + + content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content + + try: + upload_file = FileService(db.engine).upload_file( + filename=file_info.filename, + content=content, + mimetype=file_info.mimetype, + user=context.owner, + source_url=url, + ) + except services.errors.file.FileTooLargeError as file_too_large_error: + raise FileTooLargeError(file_too_large_error.description) + except services.errors.file.UnsupportedFileTypeError: + raise UnsupportedFileTypeError() + except services.errors.file.BlockedFileExtensionError as exc: + raise BlockedFileExtensionError() from exc + + upload_service.record_upload_file(context=context, file_id=upload_file.id) + payload1 = FileWithSignedUrl( + id=upload_file.id, + name=upload_file.name, + size=upload_file.size, + extension=upload_file.extension, + url=file_helpers.get_signed_file_url(upload_file_id=upload_file.id), + mime_type=upload_file.mime_type, + created_by=upload_file.created_by, + created_at=int(upload_file.created_at.timestamp()), + ) + return payload1.model_dump(mode="json"), 201 diff --git a/api/controllers/web/human_input_form.py b/api/controllers/web/human_input_form.py index 1ddf2e0717..51b0e6ac36 100644 --- a/api/controllers/web/human_input_form.py +++ b/api/controllers/web/human_input_form.py @@ -9,11 +9,13 @@ from typing import Any, NotRequired, TypedDict from flask import Response, request from flask_restx import Resource +from pydantic import BaseModel from sqlalchemy import select from werkzeug.exceptions import Forbidden from configs import dify_config from controllers.common.human_input import HumanInputFormSubmitPayload +from controllers.common.schema import register_schema_models from controllers.web import web_ns from controllers.web.error import NotFoundError, WebFormRateLimitExceededError from controllers.web.site import serialize_app_site_payload @@ -21,11 +23,20 @@ from extensions.ext_database import db from libs.helper import RateLimiter, extract_remote_ip from models.account import TenantStatus from models.model import App, Site +from services.human_input_file_upload_service import HumanInputFileUploadService from services.human_input_service import Form, FormNotFoundError, HumanInputService logger = logging.getLogger(__name__) +class HumanInputUploadTokenResponse(BaseModel): + upload_token: str + expires_at: int + + +register_schema_models(web_ns, HumanInputUploadTokenResponse) + + _FORM_SUBMIT_RATE_LIMITER = RateLimiter( prefix="web_form_submit_rate_limit", max_attempts=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_MAX_ATTEMPTS, @@ -36,6 +47,11 @@ _FORM_ACCESS_RATE_LIMITER = RateLimiter( max_attempts=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_MAX_ATTEMPTS, time_window=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_WINDOW_SECONDS, ) +_FORM_UPLOAD_TOKEN_RATE_LIMITER = RateLimiter( + prefix="web_form_upload_token_rate_limit", + max_attempts=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_MAX_ATTEMPTS, + time_window=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_WINDOW_SECONDS, +) def _stringify_default_values(values: dict[str, object]) -> dict[str, str]: @@ -78,6 +94,33 @@ def _jsonify_form_definition(form: Form, site_payload: dict | None = None) -> Re return Response(json.dumps(payload, ensure_ascii=False), mimetype="application/json") +@web_ns.route("/form/human_input//upload-token") +class HumanInputFormUploadTokenApi(Resource): + """API for issuing HITL upload tokens for active human input forms.""" + + def post(self, form_token: str): + """ + Issue an upload token for a human input form. + + POST /api/form/human_input//upload-token + """ + ip_address = extract_remote_ip(request) + if _FORM_UPLOAD_TOKEN_RATE_LIMITER.is_rate_limited(ip_address): + raise WebFormRateLimitExceededError() + _FORM_UPLOAD_TOKEN_RATE_LIMITER.increment_rate_limit(ip_address) + + try: + token = HumanInputFileUploadService(db.engine).issue_upload_token(form_token) + except FormNotFoundError: + raise NotFoundError("Form not found") + + response = HumanInputUploadTokenResponse( + upload_token=token.upload_token, + expires_at=_to_timestamp(token.expires_at), + ) + return response.model_dump(mode="json"), 200 + + @web_ns.route("/form/human_input/") class HumanInputFormApi(Resource): """API for getting and submitting human input forms via the web app.""" diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 7bab3f7bff..d5b23443d1 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -408,17 +408,19 @@ class WorkflowResponseConverter: self, *, event: QueueHumanInputFormFilledEvent, task_id: str ) -> HumanInputFormFilledResponse: run_id = self._ensure_workflow_run_id() - return HumanInputFormFilledResponse( - task_id=task_id, - workflow_run_id=run_id, - data=HumanInputFormFilledResponse.Data( - node_id=event.node_id, - node_title=event.node_title, - rendered_content=event.rendered_content, - action_id=event.action_id, - action_text=event.action_text, - ), + data = HumanInputFormFilledResponse.Data( + node_id=event.node_id, + node_title=event.node_title, + rendered_content=event.rendered_content, + action_id=event.action_id, + action_text=event.action_text, ) + if event.submitted_data is not None: + runtime_type_converter = WorkflowRuntimeTypeConverter() + + data.submitted_data = runtime_type_converter.value_to_json_encodable_recursive(event.submitted_data) + + return HumanInputFormFilledResponse(task_id=task_id, workflow_run_id=run_id, data=data) def human_input_form_timeout_to_stream_response( self, *, event: QueueHumanInputFormTimeoutEvent, task_id: str @@ -842,24 +844,24 @@ class WorkflowResponseConverter: return [] files: list[Mapping[str, Any]] = [] - if isinstance(value, FileSegment): - files.append(value.value.to_dict()) - elif isinstance(value, ArrayFileSegment): - files.extend([i.to_dict() for i in value.value]) - elif isinstance(value, File): - files.append(value.to_dict()) - elif isinstance(value, list): - for item in value: - file = cls._get_file_var_from_value(item) + match value: + case FileSegment(): + files.append(value.value.to_dict()) + case ArrayFileSegment(): + files.extend([i.to_dict() for i in value.value]) + case File(): + files.append(value.to_dict()) + case list(): + for item in value: + file = cls._get_file_var_from_value(item) + if file: + files.append(file) + case dict(): + file = cls._get_file_var_from_value(value) if file: files.append(file) - elif isinstance( - value, - dict, - ): - file = cls._get_file_var_from_value(value) - if file: - files.append(file) + case _: + pass return files diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 047b54c86c..03707a6477 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -432,6 +432,7 @@ class WorkflowBasedAppRunner: rendered_content=event.rendered_content, action_id=event.action_id, action_text=event.action_text, + submitted_data=event.submitted_data, ) ) elif isinstance(event, NodeRunHumanInputFormTimeoutEvent): diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index 221b7fb058..a0e7881ede 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -11,6 +11,7 @@ from graphon.entities import WorkflowStartReason from graphon.entities.pause_reason import PauseReason from graphon.enums import NodeType, WorkflowNodeExecutionMetadataKey from graphon.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk +from graphon.variables.segments import Segment class QueueEvent(StrEnum): @@ -508,6 +509,10 @@ class QueueHumanInputFormFilledEvent(AppQueueEvent): action_id: str action_text: str + # Keep the field name aligned with Graphon so the app-layer bridge does not + # need to translate between two equivalent payload names. + submitted_data: Mapping[str, Segment] | None = None + class QueueHumanInputFormTimeoutEvent(AppQueueEvent): """ diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index ad05566521..defec9f946 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -10,7 +10,7 @@ from graphon.entities import WorkflowStartReason from graphon.entities.pause_reason import PauseReasonType from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from graphon.model_runtime.entities.llm_entities import LLMResult, LLMUsage -from graphon.nodes.human_input.entities import FormInput, UserAction +from graphon.nodes.human_input.entities import FormInputConfig, UserActionConfig class AnnotationReplyAccount(BaseModel): @@ -284,8 +284,8 @@ class HumanInputRequiredResponse(StreamResponse): node_id: str node_title: str form_content: str - inputs: Sequence[FormInput] = Field(default_factory=list) - actions: Sequence[UserAction] = Field(default_factory=list) + inputs: Sequence[FormInputConfig] = Field(default_factory=list) + actions: Sequence[UserActionConfig] = Field(default_factory=list) display_in_ui: bool = False form_token: str | None = None resolved_default_values: Mapping[str, Any] = Field(default_factory=dict) @@ -307,8 +307,8 @@ class HumanInputRequiredPauseReasonPayload(BaseModel): node_id: str node_title: str form_content: str - inputs: Sequence[FormInput] = Field(default_factory=list) - actions: Sequence[UserAction] = Field(default_factory=list) + inputs: Sequence[FormInputConfig] = Field(default_factory=list) + actions: Sequence[UserActionConfig] = Field(default_factory=list) display_in_ui: bool = False form_token: str | None = None resolved_default_values: Mapping[str, Any] = Field(default_factory=dict) @@ -342,6 +342,8 @@ class HumanInputFormFilledResponse(StreamResponse): action_id: str action_text: str + submitted_data: Mapping[str, Any] | None = None + event: StreamEvent = StreamEvent.HUMAN_INPUT_FORM_FILLED workflow_run_id: str data: Data diff --git a/api/core/entities/execution_extra_content.py b/api/core/entities/execution_extra_content.py index 04ae193396..43252ccb2c 100644 --- a/api/core/entities/execution_extra_content.py +++ b/api/core/entities/execution_extra_content.py @@ -3,9 +3,9 @@ from __future__ import annotations from collections.abc import Mapping, Sequence from typing import Any, TypeAlias -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, JsonValue -from graphon.nodes.human_input.entities import FormInput, UserAction +from graphon.nodes.human_input.entities import FormInputConfig, UserActionConfig from models.execution_extra_content import ExecutionContentType @@ -16,9 +16,11 @@ class HumanInputFormDefinition(BaseModel): node_id: str node_title: str form_content: str - inputs: Sequence[FormInput] = Field(default_factory=list) - actions: Sequence[UserAction] = Field(default_factory=list) + inputs: Sequence[FormInputConfig] = Field(default_factory=list) + actions: Sequence[UserActionConfig] = Field(default_factory=list) display_in_ui: bool = False + + # `form_token` is `None` if the corresponding form has been submitted. form_token: str | None = None resolved_default_values: Mapping[str, Any] = Field(default_factory=dict) expiration_time: int @@ -29,16 +31,31 @@ class HumanInputFormSubmissionData(BaseModel): node_id: str node_title: str + + # deprecate: the rendered_content is deprecated and only for historical reasons. rendered_content: str + + # The identifier of action user has chosen. action_id: str + # The button text of the action user has chosen. action_text: str + # submitted_data records the submitted form data. + # Keys correspond to `output_variable_name` of HumanInput inputs. + # Values are serialized JSON forms of runtime values, including file dictionaries. + # + # For form submitted before this field is introduced, this field is populated from + # the stored submission data. + submitted_data: Mapping[str, JsonValue] | None = None + class HumanInputContent(BaseModel): model_config = ConfigDict(frozen=True) workflow_run_id: str submitted: bool + # Both the form_defintion and the form_submission_data are present in + # HumanInputContent. For historical records, the form_definition: HumanInputFormDefinition | None = None form_submission_data: HumanInputFormSubmissionData | None = None type: ExecutionContentType = Field(default=ExecutionContentType.HUMAN_INPUT) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index e7ba6e502b..bae0016744 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -569,13 +569,13 @@ class OpsTraceManager: db.session.commit() @classmethod - def get_app_tracing_config(cls, app_id: str): + def get_app_tracing_config(cls, app_id: str, session: Session): """ Get app tracing config :param app_id: app id :return: """ - app: App | None = db.session.get(App, app_id) + app: App | None = session.get(App, app_id) if not app: raise ValueError("App not found") if not app.tracing: diff --git a/api/core/prompt/utils/prompt_message_util.py b/api/core/prompt/utils/prompt_message_util.py index ba76eb0c4e..11414832e3 100644 --- a/api/core/prompt/utils/prompt_message_util.py +++ b/api/core/prompt/utils/prompt_message_util.py @@ -53,24 +53,27 @@ class PromptMessageUtil: files = [] if isinstance(prompt_message.content, list): for content in prompt_message.content: - if isinstance(content, TextPromptMessageContent): - text += content.data - elif isinstance(content, ImagePromptMessageContent): - files.append( - { - "type": "image", - "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], - "detail": content.detail.value, - } - ) - elif isinstance(content, AudioPromptMessageContent): - files.append( - { - "type": "audio", - "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], - "format": content.format, - } - ) + match content: + case TextPromptMessageContent(): + text += content.data + case ImagePromptMessageContent(): + files.append( + { + "type": "image", + "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], + "detail": content.detail.value, + } + ) + case AudioPromptMessageContent(): + files.append( + { + "type": "audio", + "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], + "format": content.format, + } + ) + case _: + continue else: text = cast(str, prompt_message.content) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 8969825be4..b290ae456e 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -9,9 +9,9 @@ from typing import TYPE_CHECKING, Any from pydantic import TypeAdapter from sqlalchemy import select from sqlalchemy.exc import IntegrityError -from sqlalchemy.orm import Session from configs import dify_config +from core.db.session_factory import session_factory from core.entities.model_entities import DefaultModelEntity, DefaultModelProviderEntity from core.entities.provider_configuration import ProviderConfiguration, ProviderConfigurations, ProviderModelBundle from core.entities.provider_entities import ( @@ -445,7 +445,7 @@ class ProviderManager: @staticmethod def _get_all_providers(tenant_id: str) -> dict[str, list[Provider]]: provider_name_to_provider_records_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(Provider).where(Provider.tenant_id == tenant_id, Provider.is_valid == True) providers = session.scalars(stmt) for provider in providers: @@ -462,7 +462,7 @@ class ProviderManager: :return: """ provider_name_to_provider_model_records_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(ProviderModel).where(ProviderModel.tenant_id == tenant_id, ProviderModel.is_valid == True) provider_models = session.scalars(stmt) for provider_model in provider_models: @@ -478,7 +478,7 @@ class ProviderManager: :return: """ provider_name_to_preferred_provider_type_records_dict = {} - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(TenantPreferredModelProvider).where(TenantPreferredModelProvider.tenant_id == tenant_id) preferred_provider_types = session.scalars(stmt) provider_name_to_preferred_provider_type_records_dict = { @@ -496,7 +496,7 @@ class ProviderManager: :return: """ provider_name_to_provider_model_settings_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(ProviderModelSetting).where(ProviderModelSetting.tenant_id == tenant_id) provider_model_settings = session.scalars(stmt) for provider_model_setting in provider_model_settings: @@ -514,7 +514,7 @@ class ProviderManager: :return: """ provider_name_to_provider_model_credentials_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(ProviderModelCredential).where(ProviderModelCredential.tenant_id == tenant_id) provider_model_credentials = session.scalars(stmt) for provider_model_credential in provider_model_credentials: @@ -544,7 +544,7 @@ class ProviderManager: return {} provider_name_to_provider_load_balancing_model_configs_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(LoadBalancingModelConfig).where(LoadBalancingModelConfig.tenant_id == tenant_id) provider_load_balancing_configs = session.scalars(stmt) for provider_load_balancing_config in provider_load_balancing_configs: @@ -578,7 +578,7 @@ class ProviderManager: :param provider_name: provider name :return: """ - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = ( select(ProviderCredential) .where( @@ -608,7 +608,7 @@ class ProviderManager: :param model_type: model type :return: """ - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = ( select(ProviderModelCredential) .where( diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 87cf6d7085..0a7811bb53 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -1078,6 +1078,13 @@ class ToolManager: if parameter.form == ToolParameter.ToolParameterForm.FORM: if variable_pool: config = tool_configurations.get(parameter.name, {}) + + selector_value = cls._extract_runtime_selector_value(parameter, config) + if selector_value is not None: + # Selector parameters carry structured dictionaries, not scalar ToolInput values. + runtime_parameters[parameter.name] = selector_value + continue + if not (config and isinstance(config, dict) and config.get("value") is not None): continue tool_input = ToolNodeData.ToolInput.model_validate(tool_configurations.get(parameter.name, {})) @@ -1105,5 +1112,39 @@ class ToolManager: runtime_parameters[parameter.name] = value return runtime_parameters + @classmethod + def _extract_runtime_selector_value(cls, parameter: ToolParameter, config: Any) -> dict[str, Any] | None: + if parameter.type not in { + ToolParameter.ToolParameterType.MODEL_SELECTOR, + ToolParameter.ToolParameterType.APP_SELECTOR, + }: + return None + if not isinstance(config, dict): + return None + + input_value = config.get("value") + if isinstance(input_value, dict) and cls._is_selector_value(parameter, input_value): + return cast("dict[str, Any]", parameter.init_frontend_parameter(input_value)) + + if cls._is_selector_value(parameter, config): + selector_value = dict(config) + selector_value.pop("type", None) + selector_value.pop("value", None) + return cast("dict[str, Any]", parameter.init_frontend_parameter(selector_value)) + + return None + + @classmethod + def _is_selector_value(cls, parameter: ToolParameter, value: Mapping[str, Any]) -> bool: + if parameter.type == ToolParameter.ToolParameterType.MODEL_SELECTOR: + return ( + isinstance(value.get("provider"), str) + and isinstance(value.get("model"), str) + and isinstance(value.get("model_type"), str) + ) + if parameter.type == ToolParameter.ToolParameterType.APP_SELECTOR: + return isinstance(value.get("app_id"), str) + return False + ToolManager.load_hardcoded_providers_cache() diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 5679466cbc..4c6e647335 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -23,36 +23,37 @@ _TOOL_FILE_URL_PATTERN = re.compile(r"(?:^|/+)files/tools/(?P[^/?# def safe_json_value(v): - if isinstance(v, datetime): - tz_name = "UTC" - if isinstance(current_user, Account) and current_user.timezone is not None: - tz_name = current_user.timezone - return v.astimezone(pytz.timezone(tz_name)).isoformat() - elif isinstance(v, date): - return v.isoformat() - elif isinstance(v, UUID): - return str(v) - elif isinstance(v, Decimal): - return float(v) - elif isinstance(v, bytes): - try: - return v.decode("utf-8") - except UnicodeDecodeError: - return v.hex() - elif isinstance(v, memoryview): - return v.tobytes().hex() - elif isinstance(v, np.integer): - return int(v) - elif isinstance(v, np.floating): - return float(v) - elif isinstance(v, np.ndarray): - return v.tolist() - elif isinstance(v, dict): - return safe_json_dict(v) - elif isinstance(v, list | tuple | set): - return [safe_json_value(i) for i in v] - else: - return v + match v: + case datetime(): + tz_name = "UTC" + if isinstance(current_user, Account) and current_user.timezone is not None: + tz_name = current_user.timezone + return v.astimezone(pytz.timezone(tz_name)).isoformat() + case date(): + return v.isoformat() + case UUID(): + return str(v) + case Decimal(): + return float(v) + case bytes(): + try: + return v.decode("utf-8") + except UnicodeDecodeError: + return v.hex() + case memoryview(): + return v.tobytes().hex() + case np.integer(): + return int(v) + case np.floating(): + return float(v) + case np.ndarray(): + return v.tolist() + case dict(): + return safe_json_dict(v) + case list() | tuple() | set(): + return [safe_json_value(i) for i in v] + case _: + return v def safe_json_dict(d: dict[str, Any]): diff --git a/api/core/workflow/human_input_adapter.py b/api/core/workflow/human_input_adapter.py index 4b765e6aea..731ae2b858 100644 --- a/api/core/workflow/human_input_adapter.py +++ b/api/core/workflow/human_input_adapter.py @@ -272,6 +272,14 @@ def _adapt_tool_node_data_for_graph(node_data: Mapping[str, Any]) -> dict[str, A normalized_tool_configurations[name] = value continue + selector_value = _extract_selector_configuration(value) + if selector_value is not None: + # Model/app selectors are dictionaries even when they come through the legacy tool configuration path. + # Move them to tool_parameters so graph validation does not flatten them as primitive constants. + found_legacy_tool_inputs = True + normalized_tool_parameters.setdefault(name, {"type": "constant", "value": selector_value}) + continue + input_type = value.get("type") input_value = value.get("value") if input_type not in {"mixed", "variable", "constant"}: @@ -310,6 +318,28 @@ def _flatten_legacy_tool_configuration_value(*, input_type: Any, input_value: An return None +def _extract_selector_configuration(value: Mapping[str, Any]) -> dict[str, Any] | None: + input_value = value.get("value") + if isinstance(input_value, Mapping) and _is_selector_configuration(input_value): + return dict(input_value) + + if _is_selector_configuration(value): + selector_value = dict(value) + selector_value.pop("type", None) + selector_value.pop("value", None) + return selector_value + + return None + + +def _is_selector_configuration(value: Mapping[str, Any]) -> bool: + return ( + isinstance(value.get("provider"), str) + and isinstance(value.get("model"), str) + and isinstance(value.get("model_type"), str) + ) or isinstance(value.get("app_id"), str) + + def _normalize_email_recipients(recipients: Mapping[str, Any]) -> dict[str, Any]: normalized = dict(recipients) diff --git a/api/core/workflow/node_runtime.py b/api/core/workflow/node_runtime.py index b8725853c4..08125ff733 100644 --- a/api/core/workflow/node_runtime.py +++ b/api/core/workflow/node_runtime.py @@ -42,7 +42,12 @@ from graphon.model_runtime.entities.llm_entities import ( from graphon.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool from graphon.model_runtime.entities.model_entities import AIModelEntity from graphon.model_runtime.model_providers.base.large_language_model import LargeLanguageModel -from graphon.nodes.human_input.entities import HumanInputNodeData +from graphon.nodes.human_input.entities import ( + FileInputConfig, + FileListInputConfig, + FormInputConfig, + HumanInputNodeData, +) from graphon.nodes.llm.runtime_protocols import ( PreparedLLMProtocol, PromptMessageSerializerProtocol, @@ -78,7 +83,6 @@ from .system_variables import SystemVariableKey, get_system_text if TYPE_CHECKING: from core.tools.__base.tool import Tool from core.tools.entities.tool_entities import ToolInvokeMessage as CoreToolInvokeMessage - from graphon.file import File from graphon.nodes.llm.file_saver import LLMFileSaver from graphon.nodes.tool.entities import ToolNodeData @@ -501,11 +505,15 @@ class DifyToolNodeRuntime(ToolNodeRuntimeProtocol): @staticmethod def _build_tool_runtime_spec(node_data: ToolNodeData) -> _WorkflowToolRuntimeSpec: + tool_configurations = dict(node_data.tool_configurations) + tool_configurations.update( + {name: tool_input.model_dump(mode="python") for name, tool_input in node_data.tool_parameters.items()} + ) return _WorkflowToolRuntimeSpec( provider_type=CoreToolProviderType(node_data.provider_type.value), provider_id=node_data.provider_id, tool_name=node_data.tool_name, - tool_configurations=dict(node_data.tool_configurations), + tool_configurations=tool_configurations, credential_id=node_data.credential_id, ) @@ -625,6 +633,7 @@ class DifyHumanInputNodeRuntime(HumanInputNodeRuntimeProtocol): self._run_context = resolve_dify_run_context(run_context) self._workflow_execution_id_getter = workflow_execution_id_getter self._form_repository = form_repository + self._file_reference_factory = DifyFileReferenceFactory(self._run_context) def _invoke_source(self) -> str: invoke_from = self._run_context.invoke_from @@ -678,6 +687,23 @@ class DifyHumanInputNodeRuntime(HumanInputNodeRuntimeProtocol): repo = self.build_form_repository() return repo.get_form(node_id) + def restore_submitted_data( + self, + *, + node_data: HumanInputNodeData, + submitted_data: Mapping[str, Any], + ) -> Mapping[str, Any]: + restored_data: dict[str, Any] = dict(submitted_data) + for input_config in node_data.inputs: + output_variable_name = input_config.output_variable_name + if output_variable_name not in submitted_data: + continue + restored_data[output_variable_name] = self._restore_submitted_value( + input_config=input_config, + value=submitted_data[output_variable_name], + ) + return restored_data + def create_form( self, *, @@ -698,6 +724,55 @@ class DifyHumanInputNodeRuntime(HumanInputNodeRuntimeProtocol): ) return repo.create_form(params) + def _restore_submitted_value( + self, + *, + input_config: FormInputConfig, + value: Any, + ) -> Any: + if isinstance(input_config, FileInputConfig): + return self._restore_submitted_file_value( + output_variable_name=input_config.output_variable_name, + value=value, + ) + if isinstance(input_config, FileListInputConfig): + return self._restore_submitted_file_list_value( + output_variable_name=input_config.output_variable_name, + value=value, + ) + return value + + def _restore_submitted_file_value( + self, + *, + output_variable_name: str, + value: Any, + ) -> Any: + if not isinstance(value, Mapping): + msg = ( + "HumanInput file submission must be persisted as a mapping, " + f"output_variable_name={output_variable_name}" + ) + raise ValueError(msg) + return self._file_reference_factory.build_from_mapping(mapping=value) + + def _restore_submitted_file_list_value( + self, + *, + output_variable_name: str, + value: Any, + ) -> list[Any]: + if not isinstance(value, list): + msg = ( + "HumanInput file-list submission must be persisted as a list, " + f"output_variable_name={output_variable_name}" + ) + raise ValueError(msg) + if any(not isinstance(item, Mapping) for item in value): + msg = f"HumanInput file-list submission must contain mappings, output_variable_name={output_variable_name}" + raise ValueError(msg) + return [self._file_reference_factory.build_from_mapping(mapping=item) for item in value] + def build_dify_llm_file_saver( *, diff --git a/api/dev/generate_fastopenapi_specs.py b/api/dev/generate_fastopenapi_specs.py new file mode 100644 index 0000000000..5a94d32b93 --- /dev/null +++ b/api/dev/generate_fastopenapi_specs.py @@ -0,0 +1,95 @@ +"""Generate FastOpenAPI OpenAPI 3.0 specs without booting the full backend.""" + +from __future__ import annotations + +import argparse +import json +import logging +import sys +from dataclasses import dataclass +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_swagger_specs import apply_runtime_defaults, drop_null_values, sort_openapi_arrays + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class FastOpenApiSpecTarget: + route: str + filename: str + + +FASTOPENAPI_SPEC_TARGETS: tuple[FastOpenApiSpecTarget, ...] = ( + FastOpenApiSpecTarget(route="/fastopenapi/openapi.json", filename="fastopenapi-console-openapi.json"), +) + + +def create_fastopenapi_spec_app(): + """Build a minimal Flask app that only mounts FastOpenAPI docs routes.""" + + apply_runtime_defaults() + + from app_factory import create_flask_app_with_configs + from extensions import ext_fastopenapi + + app = create_flask_app_with_configs() + ext_fastopenapi.init_app(app) + return app + + +def generate_fastopenapi_specs(output_dir: Path) -> list[Path]: + """Write FastOpenAPI specs to `output_dir` and return the written paths.""" + + output_dir.mkdir(parents=True, exist_ok=True) + + app = create_fastopenapi_spec_app() + client = app.test_client() + + written_paths: list[Path] = [] + for target in FASTOPENAPI_SPEC_TARGETS: + response = client.get(target.route) + if response.status_code != 200: + raise RuntimeError(f"failed to fetch {target.route}: {response.status_code}") + + payload = response.get_json() + if not isinstance(payload, dict): + raise RuntimeError(f"unexpected response payload for {target.route}") + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) + + output_path = output_dir / target.filename + output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") + written_paths.append(output_path) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-o", + "--output-dir", + type=Path, + default=Path("openapi"), + help="Directory where the OpenAPI JSON files will be written.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_fastopenapi_specs(args.output_dir) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_markdown_docs.py b/api/dev/generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..0900d08331 --- /dev/null +++ b/api/dev/generate_swagger_markdown_docs.py @@ -0,0 +1,161 @@ +"""Generate OpenAPI JSON specs and split Markdown API docs. + +The Markdown step uses `swagger-markdown`, the same converter family as the +Swagger Markdown UI, so CI and local regeneration catch converter-incompatible +OpenAPI output early. +""" + +from __future__ import annotations + +import argparse +import logging +import subprocess +import sys +import tempfile +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_fastopenapi_specs import FASTOPENAPI_SPEC_TARGETS, generate_fastopenapi_specs +from dev.generate_swagger_specs import SPEC_TARGETS, generate_specs + +logger = logging.getLogger(__name__) + +SWAGGER_MARKDOWN_PACKAGE = "swagger-markdown@3.0.0" +CONSOLE_SWAGGER_FILENAME = "console-swagger.json" +STALE_COMBINED_MARKDOWN_FILENAME = "api-reference.md" + + +def _convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + subprocess.run( + [ + "npx", + "--yes", + SWAGGER_MARKDOWN_PACKAGE, + "-i", + str(spec_path), + "-o", + str(markdown_path), + ], + check=True, + ) + + +def _demote_markdown_headings(markdown: str, *, levels: int = 1) -> str: + """Nest generated Markdown under another Markdown section.""" + + heading_prefix = "#" * levels + lines = [] + for line in markdown.splitlines(): + if line.startswith("#"): + lines.append(f"{heading_prefix}{line}") + else: + lines.append(line) + return "\n".join(lines).strip() + + +def _append_fastopenapi_markdown(console_markdown_path: Path, fastopenapi_markdown_path: Path) -> None: + """Append FastOpenAPI console docs to the existing console API Markdown.""" + + console_markdown = console_markdown_path.read_text(encoding="utf-8").rstrip() + fastopenapi_markdown = _demote_markdown_headings( + fastopenapi_markdown_path.read_text(encoding="utf-8"), + levels=2, + ) + console_markdown_path.write_text( + "\n\n".join( + [ + console_markdown, + "## FastOpenAPI Preview (OpenAPI 3.0)", + fastopenapi_markdown, + ] + ) + + "\n", + encoding="utf-8", + ) + + +def generate_markdown_docs( + swagger_dir: Path, + markdown_dir: Path, + *, + keep_swagger_json: bool = False, +) -> list[Path]: + """Generate intermediate specs, convert them to split Markdown API docs, and return Markdown paths.""" + + swagger_paths = generate_specs(swagger_dir) + fastopenapi_paths = generate_fastopenapi_specs(swagger_dir) + spec_paths = [*swagger_paths, *fastopenapi_paths] + swagger_paths_by_name = {path.name: path for path in swagger_paths} + fastopenapi_paths_by_name = {path.name: path for path in fastopenapi_paths} + + markdown_dir.mkdir(parents=True, exist_ok=True) + + written_paths: list[Path] = [] + try: + with tempfile.TemporaryDirectory(prefix="dify-api-docs-") as temp_dir: + temp_markdown_dir = Path(temp_dir) + + for target in SPEC_TARGETS: + swagger_path = swagger_paths_by_name[target.filename] + markdown_path = markdown_dir / f"{swagger_path.stem}.md" + _convert_spec_to_markdown(swagger_path, markdown_path) + written_paths.append(markdown_path) + + for target in FASTOPENAPI_SPEC_TARGETS: # type: ignore + fastopenapi_path = fastopenapi_paths_by_name[target.filename] + markdown_path = temp_markdown_dir / f"{fastopenapi_path.stem}.md" + _convert_spec_to_markdown(fastopenapi_path, markdown_path) + + console_markdown_path = markdown_dir / f"{Path(CONSOLE_SWAGGER_FILENAME).stem}.md" + _append_fastopenapi_markdown(console_markdown_path, markdown_path) + + (markdown_dir / STALE_COMBINED_MARKDOWN_FILENAME).unlink(missing_ok=True) + finally: + if not keep_swagger_json: + for path in spec_paths: + path.unlink(missing_ok=True) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--swagger-dir", + type=Path, + default=Path("openapi"), + help="Directory where intermediate JSON spec files will be written.", + ) + parser.add_argument( + "--markdown-dir", + type=Path, + default=Path("openapi/markdown"), + help="Directory where split Markdown API docs will be written.", + ) + parser.add_argument( + "--keep-swagger-json", + action="store_true", + help="Keep intermediate JSON spec files after Markdown generation.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_markdown_docs( + args.swagger_dir, + args.markdown_dir, + keep_swagger_json=args.keep_swagger_json, + ) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_specs.py b/api/dev/generate_swagger_specs.py index 7e9688bfb4..9122f3ab24 100644 --- a/api/dev/generate_swagger_specs.py +++ b/api/dev/generate_swagger_specs.py @@ -9,12 +9,15 @@ which is unnecessary when the goal is only to serialize the Flask-RESTX from __future__ import annotations import argparse +import hashlib import json import logging import os import sys +from collections.abc import MutableMapping from dataclasses import dataclass from pathlib import Path +from typing import Protocol, TypeGuard from flask import Flask from flask_restx.swagger import Swagger @@ -30,19 +33,110 @@ if str(API_ROOT) not in sys.path: class SpecTarget: route: str filename: str + namespace: str + + +class RestxApi(Protocol): + models: MutableMapping[str, object] + + def model(self, name: str, model: dict[object, object]) -> object: ... SPEC_TARGETS: tuple[SpecTarget, ...] = ( - SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json"), - SpecTarget(route="/api/swagger.json", filename="web-swagger.json"), - SpecTarget(route="/v1/swagger.json", filename="service-swagger.json"), + SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json", namespace="console"), + SpecTarget(route="/api/swagger.json", filename="web-swagger.json", namespace="web"), + SpecTarget(route="/v1/swagger.json", filename="service-swagger.json", namespace="service"), ) _ORIGINAL_REGISTER_MODEL = Swagger.register_model _ORIGINAL_REGISTER_FIELD = Swagger.register_field -def _apply_runtime_defaults() -> None: +def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]: + """Return whether a nested field map is an anonymous inline mapping.""" + + from flask_restx.model import Model, OrderedModel + + return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel)) + + +def _jsonable_schema_value(value: object) -> object: + """Return a deterministic JSON-serializable representation for schema fingerprints.""" + + if value is None or isinstance(value, str | int | float | bool): + return value + if isinstance(value, list | tuple): + return [_jsonable_schema_value(item) for item in value] + if isinstance(value, dict): + return {str(key): _jsonable_schema_value(item) for key, item in value.items()} + value_type = type(value) + return f"<{value_type.__module__}.{value_type.__qualname__}>" + + +def _field_signature(field: object) -> object: + """Build a stable signature for a Flask-RESTX field object.""" + + from flask_restx import fields + from flask_restx.model import instance + + field_instance = instance(field) + signature: dict[str, object] = { + "class": f"{field_instance.__class__.__module__}.{field_instance.__class__.__qualname__}" + } + + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + signature["nested"] = _inline_model_signature(nested) + else: + signature["nested"] = getattr( + nested, + "name", + f"<{type(nested).__module__}.{type(nested).__qualname__}>", + ) + elif hasattr(field_instance, "container"): + signature["container"] = _field_signature(field_instance.container) + else: + schema = getattr(field_instance, "__schema__", None) + if isinstance(schema, dict): + signature["schema"] = _jsonable_schema_value(schema) + + for attr_name in ( + "attribute", + "default", + "description", + "example", + "max", + "min", + "nullable", + "readonly", + "required", + "title", + ): + if hasattr(field_instance, attr_name): + signature[attr_name] = _jsonable_schema_value(getattr(field_instance, attr_name)) + + return signature + + +def _inline_model_signature(nested_fields: dict[object, object]) -> object: + """Build a stable signature for an anonymous inline model.""" + + return [ + (str(field_name), _field_signature(field)) + for field_name, field in sorted(nested_fields.items(), key=lambda item: str(item[0])) + ] + + +def _inline_model_name(nested_fields: dict[object, object]) -> str: + """Return a stable Swagger model name for an anonymous inline field map.""" + + signature = json.dumps(_inline_model_signature(nested_fields), sort_keys=True, separators=(",", ":")) + digest = hashlib.sha1(signature.encode("utf-8")).hexdigest()[:12] + return f"_AnonymousInlineModel_{digest}" + + +def apply_runtime_defaults() -> None: """Force the small config surface required for Swagger generation.""" os.environ.setdefault("SECRET_KEY", "spec-export") @@ -74,25 +168,26 @@ def _patch_swagger_for_inline_nested_dicts() -> None: anonymous_models = getattr(self, "_anonymous_inline_models", None) if anonymous_models is None: anonymous_models = {} - self._anonymous_inline_models = anonymous_models + self.__dict__["_anonymous_inline_models"] = anonymous_models anonymous_name = anonymous_models.get(id(nested_fields)) if anonymous_name is None: - anonymous_name = f"_AnonymousInlineModel{len(anonymous_models) + 1}" + anonymous_name = _inline_model_name(nested_fields) anonymous_models[id(nested_fields)] = anonymous_name - self.api.model(anonymous_name, nested_fields) + if anonymous_name not in self.api.models: + self.api.model(anonymous_name, nested_fields) return self.api.models[anonymous_name] def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: - if isinstance(model, dict): + if _is_inline_field_map(model): model = get_or_create_inline_model(self, model) return _ORIGINAL_REGISTER_MODEL(self, model) def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: nested = getattr(field, "nested", None) - if isinstance(nested, dict): + if _is_inline_field_map(nested): field.model = get_or_create_inline_model(self, nested) # type: ignore _ORIGINAL_REGISTER_FIELD(self, field) @@ -105,22 +200,169 @@ def _patch_swagger_for_inline_nested_dicts() -> None: def create_spec_app() -> Flask: """Build a minimal Flask app that only mounts the Swagger-producing blueprints.""" - _apply_runtime_defaults() + apply_runtime_defaults() _patch_swagger_for_inline_nested_dicts() app = Flask(__name__) from controllers.console import bp as console_bp + from controllers.console import console_ns from controllers.service_api import bp as service_api_bp + from controllers.service_api import service_api_ns from controllers.web import bp as web_bp + from controllers.web import web_ns app.register_blueprint(console_bp) app.register_blueprint(web_bp) app.register_blueprint(service_api_bp) + for namespace in (console_ns, web_ns, service_api_ns): + for api in namespace.apis: + _materialize_inline_model_definitions(api) + return app +def _registered_models(namespace: str) -> dict[str, object]: + """Return the Flask-RESTX models registered for a Swagger namespace.""" + + if namespace == "console": + from controllers.console import console_ns + + models = dict(console_ns.models) + for api in console_ns.apis: + models.update(api.models) + return models + if namespace == "web": + from controllers.web import web_ns + + models = dict(web_ns.models) + for api in web_ns.apis: + models.update(api.models) + return models + if namespace == "service": + from controllers.service_api import service_api_ns + + models = dict(service_api_ns.models) + for api in service_api_ns.apis: + models.update(api.models) + return models + + raise ValueError(f"unknown Swagger namespace: {namespace}") + + +def _materialize_inline_model_definitions(api: RestxApi) -> None: + """Convert inline `fields.Nested({...})` maps into named API models.""" + + from flask_restx import fields + from flask_restx.model import Model, OrderedModel, instance + + inline_models: dict[int, dict[object, object]] = {} + inline_model_names: dict[int, str] = {} + + def collect_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested) and id(nested) not in inline_models: + inline_models[id(nested)] = nested + for nested_field in nested.values(): + collect_field(nested_field) + + container = getattr(field_instance, "container", None) + if container is not None: + collect_field(container) + + for model in list(api.models.values()): + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + collect_field(field) + + for nested_fields in sorted(inline_models.values(), key=_inline_model_name): + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + + def model_name_for(nested_fields: dict[object, object]) -> str: + anonymous_name = inline_model_names.get(id(nested_fields)) + if anonymous_name is None: + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + return anonymous_name + + def materialize_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + field_instance.model = api.models[model_name_for(nested)] # type: ignore[attr-defined] + + container = getattr(field_instance, "container", None) + if container is not None: + materialize_field(container) + + index = 0 + while index < len(api.models): + model = list(api.models.values())[index] + index += 1 + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + materialize_field(field) + + +def drop_null_values(value: object) -> object: + """Remove JSON null values that make the Markdown converter crash.""" + + if isinstance(value, dict): + return {key: drop_null_values(item) for key, item in value.items() if item is not None} + if isinstance(value, list): + return [drop_null_values(item) for item in value] + return value + + +def sort_openapi_arrays(value: object, *, parent_key: str | None = None) -> object: + """Sort order-insensitive Swagger arrays so generated Markdown is stable.""" + + if isinstance(value, dict): + return {key: sort_openapi_arrays(item, parent_key=key) for key, item in value.items()} + if not isinstance(value, list): + return value + + sorted_items = [sort_openapi_arrays(item, parent_key=parent_key) for item in value] + if parent_key == "parameters": + return sorted( + sorted_items, + key=lambda item: ( + item.get("in", "") if isinstance(item, dict) else "", + item.get("name", "") if isinstance(item, dict) else "", + json.dumps(item, sort_keys=True, default=str), + ), + ) + if parent_key in {"enum", "required", "schemes", "tags"}: + string_items = [item for item in sorted_items if isinstance(item, str)] + if len(string_items) == len(sorted_items): + return sorted(string_items) + return sorted_items + + +def _merge_registered_definitions(payload: dict[str, object], namespace: str) -> dict[str, object]: + """Include registered but route-indirect models in the exported Swagger definitions.""" + + definitions = payload.setdefault("definitions", {}) + if not isinstance(definitions, dict): + raise RuntimeError("unexpected Swagger definitions payload") + + for name, model in _registered_models(namespace).items(): + schema = getattr(model, "__schema__", None) + if isinstance(schema, dict): + definitions.setdefault(name, schema) + + return payload + + def generate_specs(output_dir: Path) -> list[Path]: """Write all Swagger specs to `output_dir` and return the written paths.""" @@ -138,6 +380,9 @@ def generate_specs(output_dir: Path) -> list[Path]: payload = response.get_json() if not isinstance(payload, dict): raise RuntimeError(f"unexpected response payload for {target.route}") + payload = _merge_registered_definitions(payload, target.namespace) + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) output_path = output_dir / target.filename output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") diff --git a/api/extensions/ext_session_factory.py b/api/extensions/ext_session_factory.py index 0eb43d66f4..e19ccd11e5 100644 --- a/api/extensions/ext_session_factory.py +++ b/api/extensions/ext_session_factory.py @@ -1,7 +1,9 @@ +from flask import Flask + from core.db.session_factory import configure_session_factory from extensions.ext_database import db -def init_app(app): +def init_app(app: Flask): with app.app_context(): configure_session_factory(db.engine) diff --git a/api/factories/file_factory/builders.py b/api/factories/file_factory/builders.py index 1d2ad4d445..9635acba75 100644 --- a/api/factories/file_factory/builders.py +++ b/api/factories/file_factory/builders.py @@ -5,7 +5,7 @@ from __future__ import annotations import mimetypes import uuid from collections.abc import Mapping, Sequence -from typing import Any +from typing import Any, Literal, NotRequired, TypedDict, assert_never, cast from sqlalchemy import select @@ -19,10 +19,58 @@ from .common import resolve_mapping_file_id from .remote import get_remote_file_info from .validation import is_file_valid_with_config +type FileTypeValue = FileType | Literal["image", "document", "audio", "video", "custom"] + +type _LocalFileTransferMethod = Literal["local_file", FileTransferMethod.LOCAL_FILE] +type _RemoteUrlTransferMethod = Literal["remote_url", FileTransferMethod.REMOTE_URL] +type _ToolFileTransferMethod = Literal["tool_file", FileTransferMethod.TOOL_FILE] +type _DatasourceFileTransferMethod = Literal["datasource_file", FileTransferMethod.DATASOURCE_FILE] + + +class LocalFileMapping(TypedDict): + transfer_method: _LocalFileTransferMethod + id: NotRequired[str | None] # Read as the graph-layer File.file_id. + type: NotRequired[FileTypeValue | None] # Read for type override and upload config validation. + upload_file_id: NotRequired[str | None] # File id lookup priority 1. + reference: NotRequired[str | None] # File id lookup priority 2; may be an opaque file reference. + related_id: NotRequired[str | None] # File id lookup priority 3; legacy persisted field. + + +class RemoteUrlMapping(TypedDict): + transfer_method: _RemoteUrlTransferMethod + id: NotRequired[str | None] # Read as the graph-layer File.file_id. + type: NotRequired[FileTypeValue | None] # Read for type override and upload config validation. + upload_file_id: NotRequired[str | None] # Persisted UploadFile lookup priority 1. + reference: NotRequired[str | None] # Persisted UploadFile lookup priority 2; may be an opaque file reference. + related_id: NotRequired[str | None] # Persisted UploadFile lookup priority 3; legacy persisted field. + url: NotRequired[str | None] # External URL lookup priority 1 when no UploadFile id is resolved. + remote_url: NotRequired[str | None] # External URL lookup priority 2 when no UploadFile id is resolved. + + +class ToolFileMapping(TypedDict): + transfer_method: _ToolFileTransferMethod + id: NotRequired[str | None] # Read as the graph-layer File.file_id. + type: NotRequired[FileTypeValue | None] # Read for type override and upload config validation. + tool_file_id: NotRequired[str | None] # ToolFile lookup priority 1. + reference: NotRequired[str | None] # ToolFile lookup priority 2; may be an opaque file reference. + related_id: NotRequired[str | None] # ToolFile lookup priority 3; legacy persisted field. + + +class DatasourceFileMapping(TypedDict): + transfer_method: _DatasourceFileTransferMethod + type: NotRequired[FileTypeValue | None] # Read for type override and upload config validation. + datasource_file_id: NotRequired[str | None] # UploadFile lookup priority 1 for datasource-backed files. + reference: NotRequired[str | None] # UploadFile lookup priority 2; may be an opaque file reference. + related_id: NotRequired[str | None] # UploadFile lookup priority 3; legacy persisted field. + + +type FileMapping = LocalFileMapping | RemoteUrlMapping | ToolFileMapping | DatasourceFileMapping +type FileMappingInput = FileMapping | Mapping[str, Any] + def build_from_mapping( *, - mapping: Mapping[str, Any], + mapping: FileMappingInput, tenant_id: str, config: FileUploadConfig | None = None, strict_type_validation: bool = False, @@ -32,18 +80,45 @@ def build_from_mapping( if not transfer_method_value: raise ValueError("transfer_method is required in file mapping") - transfer_method = FileTransferMethod.value_of(transfer_method_value) - build_func = _get_build_function(transfer_method) - file = build_func( - mapping=mapping, - tenant_id=tenant_id, - transfer_method=transfer_method, - strict_type_validation=strict_type_validation, - access_controller=access_controller, - ) + transfer_method = FileTransferMethod.value_of(str(transfer_method_value)) + match transfer_method: + case FileTransferMethod.LOCAL_FILE: + file = _build_from_local_file( + mapping=cast(LocalFileMapping, mapping), + tenant_id=tenant_id, + transfer_method=transfer_method, + strict_type_validation=strict_type_validation, + access_controller=access_controller, + ) + case FileTransferMethod.REMOTE_URL: + file = _build_from_remote_url( + mapping=cast(RemoteUrlMapping, mapping), + tenant_id=tenant_id, + transfer_method=transfer_method, + strict_type_validation=strict_type_validation, + access_controller=access_controller, + ) + case FileTransferMethod.TOOL_FILE: + file = _build_from_tool_file( + mapping=cast(ToolFileMapping, mapping), + tenant_id=tenant_id, + transfer_method=transfer_method, + strict_type_validation=strict_type_validation, + access_controller=access_controller, + ) + case FileTransferMethod.DATASOURCE_FILE: + file = _build_from_datasource_file( + mapping=cast(DatasourceFileMapping, mapping), + tenant_id=tenant_id, + transfer_method=transfer_method, + strict_type_validation=strict_type_validation, + access_controller=access_controller, + ) + case _: + assert_never(transfer_method) if config and not is_file_valid_with_config( - input_file_type=mapping.get("type", FileType.CUSTOM), + input_file_type=mapping.get("type") or FileType.CUSTOM, file_extension=file.extension or "", file_transfer_method=file.transfer_method, config=config, @@ -87,19 +162,6 @@ def build_from_mappings( return files -def _get_build_function(transfer_method: FileTransferMethod): - build_functions = { - FileTransferMethod.LOCAL_FILE: _build_from_local_file, - FileTransferMethod.REMOTE_URL: _build_from_remote_url, - FileTransferMethod.TOOL_FILE: _build_from_tool_file, - FileTransferMethod.DATASOURCE_FILE: _build_from_datasource_file, - } - build_func = build_functions.get(transfer_method) - if build_func is None: - raise ValueError(f"Invalid file transfer method: {transfer_method}") - return build_func - - def _resolve_file_type( *, detected_file_type: FileType, @@ -116,7 +178,7 @@ def _resolve_file_type( def _build_from_local_file( *, - mapping: Mapping[str, Any], + mapping: LocalFileMapping, tenant_id: str, transfer_method: FileTransferMethod, strict_type_validation: bool = False, @@ -163,7 +225,7 @@ def _build_from_local_file( def _build_from_remote_url( *, - mapping: Mapping[str, Any], + mapping: RemoteUrlMapping, tenant_id: str, transfer_method: FileTransferMethod, strict_type_validation: bool = False, @@ -235,7 +297,7 @@ def _build_from_remote_url( def _build_from_tool_file( *, - mapping: Mapping[str, Any], + mapping: ToolFileMapping, tenant_id: str, transfer_method: FileTransferMethod, strict_type_validation: bool = False, @@ -278,7 +340,7 @@ def _build_from_tool_file( def _build_from_datasource_file( *, - mapping: Mapping[str, Any], + mapping: DatasourceFileMapping, tenant_id: str, transfer_method: FileTransferMethod, strict_type_validation: bool = False, @@ -298,7 +360,7 @@ def _build_from_datasource_file( raise ValueError(f"DatasourceFile {mapping.get('datasource_file_id')} not found") extension = "." + datasource_file.key.split(".")[-1] if "." in datasource_file.key else ".bin" - detected_file_type = standardize_file_type(extension="." + extension, mime_type=datasource_file.mime_type) + detected_file_type = standardize_file_type(extension=extension, mime_type=datasource_file.mime_type) file_type = _resolve_file_type( detected_file_type=detected_file_type, specified_type=mapping.get("type"), diff --git a/api/libs/typing.py b/api/libs/typing.py deleted file mode 100644 index f84e9911e0..0000000000 --- a/api/libs/typing.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import TypeGuard - - -def is_str_dict(v: object) -> TypeGuard[dict[str, object]]: - return isinstance(v, dict) - - -def is_str(v: object) -> TypeGuard[str]: - return isinstance(v, str) diff --git a/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py b/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py new file mode 100644 index 0000000000..eee58b6310 --- /dev/null +++ b/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py @@ -0,0 +1,26 @@ +"""add recommended app categories + +Revision ID: a4f2d8c9b731 +Revises: 227822d22895 +Create Date: 2026-04-29 12:00:00.000000 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a4f2d8c9b731" +down_revision = "227822d22895" +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table("recommended_apps", schema=None) as batch_op: + batch_op.add_column(sa.Column("categories", sa.JSON(), nullable=True)) + + +def downgrade(): + with op.batch_alter_table("recommended_apps", schema=None) as batch_op: + batch_op.drop_column("categories") diff --git a/api/migrations/versions/2026_05_06_1200-8d4c2a1b9f03_add_human_input_upload_tables.py b/api/migrations/versions/2026_05_06_1200-8d4c2a1b9f03_add_human_input_upload_tables.py new file mode 100644 index 0000000000..a720d70c2f --- /dev/null +++ b/api/migrations/versions/2026_05_06_1200-8d4c2a1b9f03_add_human_input_upload_tables.py @@ -0,0 +1,64 @@ +"""Add human input upload token and file association tables + +Revision ID: 8d4c2a1b9f03 +Revises: 227822d22895 +Create Date: 2026-05-06 12:00:00.000000 + +""" + +import sqlalchemy as sa +from alembic import op + +import models + +# revision identifiers, used by Alembic. +revision = "8d4c2a1b9f03" +down_revision = "227822d22895" +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_table( + "human_input_form_upload_tokens", + sa.Column("id", models.types.StringUUID(), nullable=False), + sa.Column("created_at", sa.DateTime(), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False), + sa.Column("updated_at", sa.DateTime(), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False), + sa.Column("tenant_id", models.types.StringUUID(), nullable=False), + sa.Column("app_id", models.types.StringUUID(), nullable=False), + sa.Column("form_id", models.types.StringUUID(), nullable=False), + sa.Column("recipient_id", models.types.StringUUID(), nullable=False), + sa.Column("token", sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint("id", name="human_input_form_upload_tokens_pkey"), + sa.UniqueConstraint("token", name="human_input_form_upload_tokens_token_key"), + ) + with op.batch_alter_table("human_input_form_upload_tokens", schema=None) as batch_op: + batch_op.create_index("human_input_form_upload_tokens_form_id_idx", ["form_id"], unique=False) + + op.create_table( + "human_input_form_upload_files", + sa.Column("id", models.types.StringUUID(), nullable=False), + sa.Column("created_at", sa.DateTime(), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False), + sa.Column("updated_at", sa.DateTime(), server_default=sa.text("CURRENT_TIMESTAMP"), nullable=False), + sa.Column("tenant_id", models.types.StringUUID(), nullable=False), + sa.Column("app_id", models.types.StringUUID(), nullable=False), + sa.Column("form_id", models.types.StringUUID(), nullable=False), + sa.Column("upload_file_id", models.types.StringUUID(), nullable=False), + sa.Column("upload_token_id", models.types.StringUUID(), nullable=False), + sa.PrimaryKeyConstraint("id", name="human_input_form_upload_files_pkey"), + sa.UniqueConstraint("upload_file_id", name="human_input_form_upload_files_upload_file_id_key"), + ) + with op.batch_alter_table("human_input_form_upload_files", schema=None) as batch_op: + batch_op.create_index("human_input_form_upload_files_form_id_idx", ["form_id"], unique=False) + batch_op.create_index("human_input_form_upload_files_upload_token_id_idx", ["upload_token_id"], unique=False) + + +def downgrade(): + with op.batch_alter_table("human_input_form_upload_files", schema=None) as batch_op: + batch_op.drop_index("human_input_form_upload_files_upload_token_id_idx") + batch_op.drop_index("human_input_form_upload_files_form_id_idx") + op.drop_table("human_input_form_upload_files") + + with op.batch_alter_table("human_input_form_upload_tokens", schema=None) as batch_op: + batch_op.drop_index("human_input_form_upload_tokens_form_id_idx") + op.drop_table("human_input_form_upload_tokens") diff --git a/api/models/__init__.py b/api/models/__init__.py index 7fbcc78d68..d3a238a8e3 100644 --- a/api/models/__init__.py +++ b/api/models/__init__.py @@ -46,7 +46,7 @@ from .evaluation import ( EvaluationTargetType, ) from .execution_extra_content import ExecutionExtraContent, HumanInputContent -from .human_input import HumanInputForm +from .human_input import HumanInputForm, HumanInputFormUploadFile, HumanInputFormUploadToken from .model import ( AccountTrialAppRecord, ApiRequest, @@ -182,6 +182,8 @@ __all__ = [ "ExternalKnowledgeBindings", "HumanInputContent", "HumanInputForm", + "HumanInputFormUploadFile", + "HumanInputFormUploadToken", "IconType", "InstalledApp", "InvitationCode", diff --git a/api/models/human_input.py b/api/models/human_input.py index 7447d3efcb..7b02e8d29d 100644 --- a/api/models/human_input.py +++ b/api/models/human_input.py @@ -251,3 +251,55 @@ class HumanInputFormRecipient(DefaultFieldsMixin, Base): access_token=_generate_token(), ) return recipient_model + + +class HumanInputFormUploadToken(DefaultFieldsMixin, Base): + """Upload authorization token bound to one human input form recipient. + + HITL upload tokens are intentionally separate from app/service bearer tokens. + The token is stored as an opaque random value so upload endpoints can perform + a direct lookup without entering the normal Web App authentication chain. + Upload ownership is resolved from the form's workflow run initiator instead + of being persisted on the token row itself. + """ + + __tablename__ = "human_input_form_upload_tokens" + __table_args__ = ( + sa.UniqueConstraint("token", name="human_input_form_upload_tokens_token_key"), + sa.Index("human_input_form_upload_tokens_form_id_idx", "form_id"), + ) + + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + form_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + recipient_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + token: Mapped[str] = mapped_column(sa.String(255), nullable=False) + + form: Mapped[HumanInputForm] = relationship( + "HumanInputForm", + uselist=False, + foreign_keys=[form_id], + primaryjoin="foreign(HumanInputFormUploadToken.form_id) == HumanInputForm.id", + lazy="raise", + ) + + +class HumanInputFormUploadFile(DefaultFieldsMixin, Base): + """Association between a human input form and a file uploaded through its token. + + Ownership remains on ``UploadFile`` itself; this table only records the + durable form/token/file linkage needed by Human Input flows. + """ + + __tablename__ = "human_input_form_upload_files" + __table_args__ = ( + sa.UniqueConstraint("upload_file_id", name="human_input_form_upload_files_upload_file_id_key"), + sa.Index("human_input_form_upload_files_form_id_idx", "form_id"), + sa.Index("human_input_form_upload_files_upload_token_id_idx", "upload_token_id"), + ) + + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + form_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + upload_file_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + upload_token_id: Mapped[str] = mapped_column(StringUUID, nullable=False) diff --git a/api/models/model.py b/api/models/model.py index 25c330b062..f7f90465cf 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -878,6 +878,7 @@ class RecommendedApp(TypeBase): copyright: Mapped[str] = mapped_column(String(255), nullable=False) privacy_policy: Mapped[str] = mapped_column(String(255), nullable=False) category: Mapped[str] = mapped_column(String(255), nullable=False) + categories: Mapped[list[str] | None] = mapped_column(sa.JSON, nullable=True, default=None) custom_disclaimer: Mapped[str] = mapped_column(LongText, default="") position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) is_listed: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) diff --git a/api/models/provider.py b/api/models/provider.py index 2bb67d605b..8dc3ce4ff6 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -9,11 +9,11 @@ import sqlalchemy as sa from sqlalchemy import DateTime, String, func, select, text from sqlalchemy.orm import Mapped, mapped_column +from core.db.session_factory import session_factory from graphon.model_runtime.entities.model_entities import ModelType from libs.uuid_utils import uuidv7 from .base import TypeBase -from .engine import db from .enums import CredentialSourceType, PaymentStatus, ProviderQuotaType from .types import EnumText, LongText, StringUUID @@ -82,7 +82,8 @@ class Provider(TypeBase): @cached_property def credential(self): if self.credential_id: - return db.session.scalar(select(ProviderCredential).where(ProviderCredential.id == self.credential_id)) + with session_factory.create_session() as session: + return session.scalar(select(ProviderCredential).where(ProviderCredential.id == self.credential_id)) @property def credential_name(self): @@ -145,9 +146,10 @@ class ProviderModel(TypeBase): @cached_property def credential(self): if self.credential_id: - return db.session.scalar( - select(ProviderModelCredential).where(ProviderModelCredential.id == self.credential_id) - ) + with session_factory.create_session() as session: + return session.scalar( + select(ProviderModelCredential).where(ProviderModelCredential.id == self.credential_id) + ) @property def credential_name(self): diff --git a/api/openapi/markdown/console-swagger.md b/api/openapi/markdown/console-swagger.md new file mode 100644 index 0000000000..a69cecd83c --- /dev/null +++ b/api/openapi/markdown/console-swagger.md @@ -0,0 +1,14766 @@ +# Console API +Console management APIs for app configuration, monitoring, and administration + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## console +Console management API operations + +### /account/avatar + +#### GET +##### Description + +Get account avatar url + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarQuery](#accountavatarquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarPayload](#accountavatarpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailSendPayload](#changeemailsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/check-email-unique + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CheckEmailUniquePayload](#checkemailuniquepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/reset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailResetPayload](#changeemailresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailValidityPayload](#changeemailvaliditypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletePayload](#accountdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/feedback + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletionFeedbackPayload](#accountdeletionfeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/verify + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationStatusResponse](#educationstatusresponse) | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationActivatePayload](#educationactivatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education/autocomplete + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationAutocompleteQuery](#educationautocompletequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationAutocompleteResponse](#educationautocompleteresponse) | + +### /account/education/verify + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationVerifyResponse](#educationverifyresponse) | + +### /account/init + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInitPayload](#accountinitpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/integrates + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountIntegrateListResponse](#accountintegratelistresponse) | + +### /account/interface-language + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceLanguagePayload](#accountinterfacelanguagepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/interface-theme + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceThemePayload](#accountinterfacethemepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountNamePayload](#accountnamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountPasswordPayload](#accountpasswordpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/profile + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/timezone + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountTimezonePayload](#accounttimezonepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /activate + +#### POST +##### Description + +Activate account with invitation token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivatePayload](#activatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Account activated successfully | [ActivationResponse](#activationresponse) | +| 400 | Already activated or invalid token | | + +### /activate/check + +#### GET +##### Description + +Check if activation token is valid + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivateCheckQuery](#activatecheckquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ActivationCheckResponse](#activationcheckresponse) | + +### /admin/batch_add_notification_accounts + +#### POST +##### Description + +Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a 'file' field (CSV or TXT, one email per line) plus a 'notification_id' field. Emails that do not match any account are silently skipped. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Accounts added successfully | + +### /admin/delete-explore-banner/{banner_id} + +#### DELETE +##### Description + +Delete an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| banner_id | path | Banner ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Banner deleted successfully | + +### /admin/insert-explore-apps + +#### POST +##### Description + +Insert or update an app in the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreAppPayload](#insertexploreapppayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | App updated successfully | +| 201 | App inserted successfully | +| 404 | App not found | + +### /admin/insert-explore-apps/{app_id} + +#### DELETE +##### Description + +Remove an app from the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID to remove | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App removed successfully | + +### /admin/insert-explore-banner + +#### POST +##### Description + +Insert an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreBannerPayload](#insertexplorebannerpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Banner inserted successfully | + +### /admin/upsert_notification + +#### POST +##### Description + +Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpsertNotificationPayload](#upsertnotificationpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Notification upserted successfully | + +### /all-workspaces + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceListQuery](#workspacelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-based-extension + +#### GET +##### Description + +Get all API-based extensions for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionListResponse](#apibasedextensionlistresponse) | + +#### POST +##### Description + +Create a new API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Extension created successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-based-extension/{id} + +#### DELETE +##### Description + +Delete API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Extension deleted successfully | + +#### GET +##### Description + +Get API-based extension by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +#### POST +##### Description + +Update API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Extension updated successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-key-auth/data-source + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/binding + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiKeyAuthBindingPayload](#apikeyauthbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/{binding_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/prompt-templates + +#### GET +##### Description + +Get advanced prompt templates based on app mode and model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedPromptTemplateQuery](#advancedprompttemplatequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Prompt templates retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps + +#### GET +##### Summary + +Get app list + +##### Description + +Get list of applications with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppListQuery](#applistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppPagination](#apppagination) | + +#### POST +##### Summary + +Create app + +##### Description + +Create a new application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAppPayload](#createapppayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App created successfully | [AppDetail](#appdetail) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppImportPayload](#appimportpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import completed | [Import](#import) | +| 202 | Import pending confirmation | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/imports/{app_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dependencies checked | [CheckDependenciesResult](#checkdependenciesresult) | + +### /apps/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import confirmed | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/workflows/online-users + +#### POST +##### Description + +Get workflow online users + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowOnlineUsersPayload](#workflowonlineuserspayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id} + +#### DELETE +##### Summary + +Delete app + +##### Description + +Delete application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App deleted successfully | +| 403 | Insufficient permissions | + +#### GET +##### Summary + +Get app detail + +##### Description + +Get application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppDetailWithSite](#appdetailwithsite) | + +#### PUT +##### Summary + +Update app + +##### Description + +Update application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAppPayload](#updateapppayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App updated successfully | [AppDetailWithSite](#appdetailwithsite) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/advanced-chat/workflow-runs + +#### GET +##### Summary + +Get advanced chat app workflow run list + +##### Description + +Get advanced chat workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [AdvancedChatWorkflowRunPagination](#advancedchatworkflowrunpagination) | + +### /apps/{app_id}/advanced-chat/workflow-runs/count + +#### GET +##### Summary + +Get advanced chat workflow runs count statistics + +##### Description + +Get advanced chat workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow for advanced chat application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedChatWorkflowRunPayload](#advancedchatworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow run started successfully | +| 400 | Invalid request parameters | +| 403 | Permission denied | + +### /apps/{app_id}/agent/logs + +#### GET +##### Summary + +Get agent logs + +##### Description + +Get agent execution logs for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AgentLogQuery](#agentlogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Agent logs retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps/{app_id}/annotation-reply/{action} + +#### POST +##### Description + +Enable or disable annotation reply for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyPayload](#annotationreplypayload) | +| action | path | Action to perform (enable/disable) | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-reply/{action}/status/{job_id} + +#### GET +##### Description + +Get status of annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-setting + +#### GET +##### Description + +Get annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotation settings retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-settings/{annotation_setting_id} + +#### POST +##### Description + +Update annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationSettingUpdatePayload](#annotationsettingupdatepayload) | +| annotation_setting_id | path | Annotation setting ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Settings updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get annotations for an app with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationListQuery](#annotationlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotations retrieved successfully | +| 403 | Insufficient permissions | + +#### POST +##### Description + +Create a new annotation for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAnnotationPayload](#createannotationpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/batch-import + +#### POST +##### Description + +Batch import annotations from CSV file with rate limiting and security checks + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Batch import started successfully | +| 400 | No file uploaded or too many files | +| 403 | Insufficient permissions | +| 413 | File too large | +| 429 | Too many requests or concurrent imports | + +### /apps/{app_id}/annotations/batch-import-status/{job_id} + +#### GET +##### Description + +Get status of batch import job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations/count + +#### GET +##### Description + +Get count of message annotations for the app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation count retrieved successfully | [AnnotationCountResponse](#annotationcountresponse) | + +### /apps/{app_id}/annotations/export + +#### GET +##### Description + +Export all annotations for an app with CSV injection protection + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations exported successfully | [AnnotationExportList](#annotationexportlist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | | Yes | string | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Description + +Update or delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAnnotationPayload](#updateannotationpayload) | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 204 | Annotation deleted successfully | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id}/hit-histories + +#### GET +##### Description + +Get hit histories for an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | +| limit | query | Page size | No | integer | +| page | query | Page number | No | integer | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit histories retrieved successfully | [AnnotationHitHistoryList](#annotationhithistorylist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/api-enable + +#### POST +##### Description + +Enable or disable app API + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppApiStatusPayload](#appapistatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/audio-to-text + +#### POST +##### Description + +Transcript audio to text for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Audio transcription successful | [AudioTranscriptResponse](#audiotranscriptresponse) | +| 400 | Bad request - No audio uploaded or unsupported type | | +| 413 | Audio file too large | | + +### /apps/{app_id}/chat-conversations + +#### GET +##### Description + +Get chat conversations with pagination, filtering and summary + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatConversationQuery](#chatconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationWithSummaryPagination](#conversationwithsummarypagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/chat-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a chat conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get chat conversation details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationDetail](#conversationdetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages + +#### GET +##### Description + +Get chat messages for a conversation with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagesQuery](#chatmessagesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [MessageInfiniteScrollPaginationResponse](#messageinfinitescrollpaginationresponse) | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested questions for a message + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Suggested questions retrieved successfully | [SuggestedQuestionsResponse](#suggestedquestionsresponse) | +| 404 | Message or conversation not found | | + +### /apps/{app_id}/chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/completion-conversations + +#### GET +##### Description + +Get completion conversations with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionConversationQuery](#completionconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationPagination](#conversationpagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/completion-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a completion conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get completion conversation details with messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationMessageDetail](#conversationmessagedetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/completion-messages + +#### POST +##### Description + +Generate completion message for debugging + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion generated successfully | +| 400 | Invalid request parameters | +| 404 | App not found | + +### /apps/{app_id}/completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/conversation-variables + +#### GET +##### Description + +Get conversation variables for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [PaginatedConversationVariableResponse](#paginatedconversationvariableresponse) | + +### /apps/{app_id}/convert-to-workflow + +#### POST +##### Summary + +Convert basic mode of chatbot app to workflow mode + +##### Description + +Convert application to workflow mode +Convert expert mode of chatbot app to workflow mode +Convert Completion App to Workflow App + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConvertToWorkflowPayload](#converttoworkflowpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application converted to workflow successfully | +| 400 | Application cannot be converted | +| 403 | Permission denied | + +### /apps/{app_id}/copy + +#### POST +##### Summary + +Copy app + +##### Description + +Create a copy of an existing application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CopyAppPayload](#copyapppayload) | +| app_id | path | Application ID to copy | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App copied successfully | [AppDetailWithSite](#appdetailwithsite) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/export + +#### GET +##### Summary + +Export app + +##### Description + +Export application configuration as DSL + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppExportQuery](#appexportquery) | +| app_id | path | Application ID to export | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App exported successfully | [AppExportResponse](#appexportresponse) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/feedbacks + +#### POST +##### Description + +Create or update message feedback (like/dislike) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback updated successfully | +| 403 | Insufficient permissions | +| 404 | Message not found | + +### /apps/{app_id}/feedbacks/export + +#### GET +##### Description + +Export user feedback data for Google Sheets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackExportQuery](#feedbackexportquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback data exported successfully | +| 400 | Invalid parameters | +| 500 | Internal server error | + +### /apps/{app_id}/icon + +#### POST +##### Description + +Update application icon + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppIconPayload](#appiconpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Icon updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/messages/{message_id} + +#### GET +##### Description + +Get message details by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Message retrieved successfully | [MessageDetailResponse](#messagedetailresponse) | +| 404 | Message not found | | + +### /apps/{app_id}/model-config + +#### POST +##### Summary + +Modify app model config + +##### Description + +Update application model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ModelConfigRequest](#modelconfigrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Model configuration updated successfully | +| 400 | Invalid configuration | +| 404 | App not found | + +### /apps/{app_id}/name + +#### POST +##### Description + +Check if app name is available + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppNamePayload](#appnamepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Name availability checked | [AppDetail](#appdetail) | + +### /apps/{app_id}/publish-to-creators-platform + +#### POST +##### Summary + +Publish app to Creators Platform + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/server + +#### GET +##### Description + +Get MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration retrieved successfully | [AppMCPServerResponse](#appmcpserverresponse) | + +#### POST +##### Description + +Create MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerCreatePayload](#mcpservercreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | MCP server configuration created successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | + +#### PUT +##### Description + +Update MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerUpdatePayload](#mcpserverupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration updated successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /apps/{app_id}/site + +#### POST +##### Description + +Update application site configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteUpdatePayload](#appsiteupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site configuration updated successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions | | +| 404 | App not found | | + +### /apps/{app_id}/site-enable + +#### POST +##### Description + +Enable or disable app site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteStatusPayload](#appsitestatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/site/access-token-reset + +#### POST +##### Description + +Reset access token for application site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Access token reset successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions (admin/owner required) | | +| 404 | App or site not found | | + +### /apps/{app_id}/statistics/average-response-time + +#### GET +##### Description + +Get average response time statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average response time statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/average-session-interactions + +#### GET +##### Description + +Get average session interaction statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average session interaction statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-conversations + +#### GET +##### Description + +Get daily conversation statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily conversation statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-end-users + +#### GET +##### Description + +Get daily terminal/end-user statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily terminal statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-messages + +#### GET +##### Description + +Get daily message statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily message statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/token-costs + +#### GET +##### Description + +Get daily token cost statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily token cost statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/tokens-per-second + +#### GET +##### Description + +Get tokens per second statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tokens per second statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/user-satisfaction-rate + +#### GET +##### Description + +Get user satisfaction rate statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | User satisfaction rate statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/text-to-audio + +#### POST +##### Description + +Convert text to speech for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechPayload](#texttospeechpayload) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text to speech conversion successful | +| 400 | Bad request - Invalid parameters | + +### /apps/{app_id}/text-to-audio/voices + +#### GET +##### Description + +Get available TTS voices for a specific language + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechVoiceQuery](#texttospeechvoicequery) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | TTS voices retrieved successfully | [ object ] | +| 400 | Invalid language parameter | | + +### /apps/{app_id}/trace + +#### GET +##### Summary + +Get app trace + +##### Description + +Get app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration retrieved successfully | + +#### POST +##### Description + +Update app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppTracePayload](#apptracepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/trace-config + +#### DELETE +##### Summary + +Delete an existing trace app configuration + +##### Description + +Delete an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tracing configuration deleted successfully | +| 400 | Invalid request parameters or configuration not found | + +#### GET +##### Description + +Get tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration retrieved successfully | object | +| 400 | Invalid request parameters | | + +#### PATCH +##### Summary + +Update an existing trace app configuration + +##### Description + +Update an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration updated successfully | object | +| 400 | Invalid request parameters or configuration not found | | + +#### POST +##### Summary + +Create a new trace app configuration + +##### Description + +Create a new tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Tracing configuration created successfully | object | +| 400 | Invalid request parameters or configuration already exists | | + +### /apps/{app_id}/trigger-enable + +#### POST +##### Summary + +Update app trigger (enable/disable) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ParserEnable](#parserenable) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerResponse](#workflowtriggerresponse) | + +### /apps/{app_id}/triggers + +#### GET +##### Summary + +Get app triggers list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerListResponse](#workflowtriggerlistresponse) | + +### /apps/{app_id}/workflow-app-logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow application execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow app logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | + +### /apps/{app_id}/workflow-archived-logs + +#### GET +##### Summary + +Get workflow archived logs + +##### Description + +Get workflow archived execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow archived logs retrieved successfully | [WorkflowArchivedLogPaginationResponse](#workflowarchivedlogpaginationresponse) | + +### /apps/{app_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Description + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [WorkflowRunPagination](#workflowrunpagination) | + +### /apps/{app_id}/workflow-runs/count + +#### GET +##### Summary + +Get workflow runs count statistics + +##### Description + +Get workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 403 | Permission denied | +| 404 | Task not found | + +### /apps/{app_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Description + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run detail retrieved successfully | [WorkflowRunDetail](#workflowrundetail) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow-runs/{run_id}/export + +#### GET +##### Description + +Generate a download URL for an archived workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Export URL generated | [WorkflowRunExport](#workflowrunexport) | + +### /apps/{app_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Description + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node executions retrieved successfully | [WorkflowRunNodeExecutionList](#workflowrunnodeexecutionlist) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow/comments + +#### GET +##### Summary + +Get all comments for a workflow + +##### Description + +Get all comments for a workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comments retrieved successfully | [WorkflowCommentBasic](#workflowcommentbasic) | + +#### POST +##### Summary + +Create a new workflow comment + +##### Description + +Create a new workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentCreatePayload](#workflowcommentcreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Comment created successfully | [WorkflowCommentCreate](#workflowcommentcreate) | + +### /apps/{app_id}/workflow/comments/mention-users + +#### GET +##### Summary + +Get all users in current tenant for mentions + +##### Description + +Get all users in current tenant for mentions + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Mentionable users retrieved successfully | [WorkflowCommentMentionUsersPayload](#workflowcommentmentionuserspayload) | + +### /apps/{app_id}/workflow/comments/{comment_id} + +#### DELETE +##### Summary + +Delete a workflow comment + +##### Description + +Delete a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Comment deleted successfully | + +#### GET +##### Summary + +Get a specific workflow comment + +##### Description + +Get a specific workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment retrieved successfully | [WorkflowCommentDetail](#workflowcommentdetail) | + +#### PUT +##### Summary + +Update a workflow comment + +##### Description + +Update a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentUpdatePayload](#workflowcommentupdatepayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment updated successfully | [WorkflowCommentUpdate](#workflowcommentupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies + +#### POST +##### Summary + +Add a reply to a workflow comment + +##### Description + +Add a reply to a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Reply created successfully | [WorkflowCommentReplyCreate](#workflowcommentreplycreate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id} + +#### DELETE +##### Summary + +Delete a comment reply + +##### Description + +Delete a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Reply deleted successfully | + +#### PUT +##### Summary + +Update a comment reply + +##### Description + +Update a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Reply updated successfully | [WorkflowCommentReplyUpdate](#workflowcommentreplyupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/resolve + +#### POST +##### Summary + +Resolve a workflow comment + +##### Description + +Resolve a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment resolved successfully | [WorkflowCommentResolve](#workflowcommentresolve) | + +### /apps/{app_id}/workflow/statistics/average-app-interactions + +#### GET +##### Description + +Get workflow average app interaction statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Average app interaction statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-conversations + +#### GET +##### Description + +Get workflow daily runs statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily runs statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-terminals + +#### GET +##### Description + +Get workflow daily terminals statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily terminals statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/token-costs + +#### GET +##### Description + +Get workflow daily token cost statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily token cost statistics retrieved successfully | + +### /apps/{app_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Description + +Get all published workflows for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowListQuery](#workflowlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflows retrieved successfully | [WorkflowPagination](#workflowpagination) | + +### /apps/{app_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configurations for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configurations retrieved successfully | + +### /apps/{app_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configuration by type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DefaultBlockConfigQuery](#defaultblockconfigquery) | +| app_id | path | Application ID | Yes | string | +| block_type | path | Block type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configuration retrieved successfully | +| 404 | Block type not found | + +### /apps/{app_id}/workflows/draft + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Draft workflow not found | | + +#### POST +##### Summary + +Sync draft workflow + +##### Description + +Sync draft workflow configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SyncDraftWorkflowPayload](#syncdraftworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow synced successfully | [SyncDraftWorkflowResponse](#syncdraftworkflowresponse) | +| 400 | Invalid workflow configuration | | +| 403 | Permission denied | | + +### /apps/{app_id}/workflows/draft/conversation-variables + +#### GET +##### Description + +Get conversation variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | +| 404 | Draft workflow not found | | + +#### POST +##### Description + +Update conversation variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation variables updated successfully | + +### /apps/{app_id}/workflows/draft/environment-variables + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get environment variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables retrieved successfully | +| 404 | Draft workflow not found | + +#### POST +##### Description + +Update environment variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EnvironmentVariableUpdatePayload](#environmentvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables updated successfully | + +### /apps/{app_id}/workflows/draft/features + +#### POST +##### Description + +Update draft workflow features + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowFeaturesPayload](#workflowfeaturespayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow features updated successfully | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test + +#### POST +##### Summary + +Test human input delivery + +##### Description + +Test human input delivery for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputDeliveryTestPayload](#humaninputdeliverytestpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Description + +Get last run result for draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node last run retrieved successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node last run not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Description + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowNodeRunPayload](#draftworkflownoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node run started successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute single node when event arrives + +##### Description + +Poll for trigger events and execute single node when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and node executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Description + +Delete all variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Node variables deleted successfully | + +#### GET +##### Description + +Get variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Draft workflow run started successfully | +| 403 | Permission denied | + +### /apps/{app_id}/workflows/draft/system-variables + +#### GET +##### Description + +Get system variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | System variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute full workflow when event arrives + +##### Description + +Poll for trigger events and execute full workflow when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunRequest](#draftworkflowtriggerrunrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/trigger/run-all + +#### POST +##### Summary + +Full workflow debug when the start node is a trigger + +##### Description + +Full workflow debug when the start node is a trigger + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunAllPayload](#draftworkflowtriggerrunallpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/variables + +#### DELETE +##### Description + +Delete all draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Workflow variables deleted successfully | + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableListQuery](#workflowdraftvariablelistquery) | +| app_id | path | Application ID | Yes | string | +| limit | query | Number of items per page (1-100) | No | string | +| page | query | Page number (1-100000) | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow variables retrieved successfully | [WorkflowDraftVariableListWithoutValue](#workflowdraftvariablelistwithoutvalue) | + +### /apps/{app_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Description + +Delete a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Variable deleted successfully | +| 404 | Variable not found | + +#### GET +##### Description + +Get a specific workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable retrieved successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +#### PATCH +##### Description + +Update a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableUpdatePayload](#workflowdraftvariableupdatepayload) | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Description + +Reset a workflow variable to its default value + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable reset successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 204 | Variable reset (no content) | | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/publish + +#### GET +##### Summary + +Get published workflow + +##### Description + +Get published workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Published workflow not found | | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PublishWorkflowPayload](#publishworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/triggers/webhook + +#### GET +##### Summary + +Get webhook trigger for a node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WebhookTriggerResponse](#webhooktriggerresponse) | + +### /apps/{app_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Description + +Update workflow by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowUpdatePayload](#workflowupdatepayload) | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Workflow ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow updated successfully | [Workflow](#workflow) | +| 403 | Permission denied | | +| 404 | Workflow not found | | + +### /apps/{app_id}/workflows/{workflow_id}/restore + +#### POST +##### Description + +Restore a published workflow version into the draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Published workflow ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow restored successfully | +| 400 | Source workflow must be published | +| 404 | Workflow not found | + +### /apps/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for an app + +##### Description + +Get all API keys for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for an app + +##### Description + +Create a new API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /apps/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for an app + +##### Description + +Delete an API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /apps/{server_id}/server/refresh + +#### GET +##### Description + +Refresh MCP server configuration and regenerate server code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| server_id | path | Server ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server refreshed successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /auth/plugin/datasource/default-list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialPayload](#datasourcecredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCustomClientPayload](#datasourcecustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/default + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceDefaultPayload](#datasourcedefaultpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialDeletePayload](#datasourcecredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialUpdatePayload](#datasourcecredentialupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update-name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceUpdateNamePayload](#datasourceupdatenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/invoices + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/partners/{partner_key}/tenants + +#### PUT +##### Description + +Sync partner tenants bindings + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PartnerTenantsPayload](#partnertenantspayload) | +| partner_key | path | Partner key | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tenants synced to partner successfully | +| 400 | Invalid partner information | + +### /billing/subscription + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /code-based-extension + +#### GET +##### Description + +Get code-based extension data by module name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| module | query | Extension module name | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [CodeBasedExtensionResponse](#codebasedextensionresponse) | + +### /compliance/download + +#### GET +##### Description + +Get compliance document download link + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ComplianceDownloadQuery](#compliancedownloadquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates/{binding_id}/{action} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets + +#### GET +##### Description + +Get list of datasets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| ids | query | Filter by dataset IDs (list) | No | string | +| include_all | query | Include all datasets (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| tag_ids | query | Filter by tag IDs (list) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | + +#### POST +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Dataset created successfully | +| 400 | Invalid request parameters | + +### /datasets/api-base-info + +#### GET +##### Description + +Get dataset API base information + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | API base info retrieved successfully | + +### /datasets/api-keys + +#### GET +##### Description + +Get dataset API keys + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/api-keys/{api_key_id} + +#### DELETE +##### Description + +Delete dataset API key + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /datasets/batch_import_status/{job_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external + +#### POST +##### Description + +Create external knowledge dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalDatasetCreatePayload](#externaldatasetcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | External dataset created successfully | [DatasetDetail](#datasetdetail) | +| 400 | Invalid parameters | | +| 403 | Permission denied | | + +### /datasets/external-knowledge-api + +#### GET +##### Description + +Get external knowledge API templates + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API templates retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get external knowledge API template details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API template retrieved successfully | +| 404 | Template not found | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id}/use-check + +#### GET +##### Description + +Check if external knowledge API is being used + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Usage check completed successfully | + +### /datasets/indexing-estimate + +#### POST +##### Description + +Estimate dataset indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IndexingEstimatePayload](#indexingestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | + +### /datasets/init + +#### POST +##### Description + +Initialize dataset with documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Dataset initialized successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | +| 400 | Invalid request parameters | | + +### /datasets/metadata/built-in + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/notion-indexing-estimate + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/process-rule + +#### GET +##### Description + +Get dataset document processing rules + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| document_id | query | Document ID (optional) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Process rules retrieved successfully | + +### /datasets/retrieval-setting + +#### GET +##### Description + +Get dataset retrieval settings + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Retrieval settings retrieved successfully | + +### /datasets/retrieval-setting/{vector_type} + +#### GET +##### Description + +Get mock dataset retrieval settings by vector type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| vector_type | path | Vector store type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Mock retrieval settings retrieved successfully | + +### /datasets/{dataset_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset retrieved successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +#### PATCH +##### Description + +Update dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset updated successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/api-keys/{status} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| status | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/auto-disable-logs + +#### GET +##### Description + +Get dataset auto disable logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Auto disable logs retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/batch/{batch}/indexing-estimate + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/batch/{batch}/indexing-status + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| fetch | query | Fetch full details (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| sort | query | Sort order (default: -created_at) | No | string | +| status | query | Filter documents by display status | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Documents created successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Summary + +Stream a ZIP archive containing the requested uploaded documents + +##### Description + +Download selected dataset documents as a single ZIP archive (upload-file only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/generate-summary + +#### POST +##### Summary + +Generate summary index for specified documents + +##### Description + +Generate summary index for documents +This endpoint checks if the dataset configuration supports summary generation +(indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), +then asynchronously generates summary indexes for the provided documents. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [GenerateSummaryPayload](#generatesummarypayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary generation started successfully | +| 400 | Invalid request or dataset configuration | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/status/{action}/batch + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get document details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| metadata | query | Metadata inclusion (all/only/without) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a dataset document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-estimate + +#### GET +##### Description + +Estimate document indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | +| 400 | Document already finished | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-status + +#### GET +##### Description + +Get document indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/metadata + +#### PUT +##### Description + +Update document metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentMetadataUpdatePayload](#documentmetadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document metadata updated successfully | +| 403 | Permission denied | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/pause + +#### PATCH +##### Summary + +pause document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/resume + +#### PATCH +##### Summary + +recover document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/{action} + +#### PATCH +##### Description + +Update document processing status (pause/resume) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform (pause/resume) | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Processing status updated successfully | +| 400 | Invalid action | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/rename + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRenamePayload](#documentrenamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Document renamed successfully | [DocumentResponse](#documentresponse) | + +### /datasets/{dataset_id}/documents/{document_id}/segment + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segment/{action} + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/batch_import + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/summary-status + +#### GET +##### Summary + +Get summary index generation status for a document + +##### Description + +Get summary index generation status for a document +Returns: +- total_segments: Total number of segments in the document +- summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records +- summaries: List of summary records with status and content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/website-sync + +#### GET +##### Summary + +sync website document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/error-docs + +#### GET +##### Description + +Get dataset error documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Error documents retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/external-hit-testing + +#### POST +##### Description + +Test external knowledge retrieval for dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalHitTestingPayload](#externalhittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External hit testing completed successfully | +| 400 | Invalid parameters | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Description + +Test dataset knowledge retrieval + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit testing completed successfully | [HitTestingResponse](#hittestingresponse) | +| 400 | Invalid parameters | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/indexing-status + +#### GET +##### Description + +Get dataset indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/permission-part-users + +#### GET +##### Description + +Get dataset permission user list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Permission users retrieved successfully | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/queries + +#### GET +##### Description + +Get dataset query history + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Query history retrieved successfully | [DatasetQueryDetail](#datasetquerydetail) | + +### /datasets/{dataset_id}/related-apps + +#### GET +##### Description + +Get applications related to dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Related apps retrieved successfully | [RelatedAppList](#relatedapplist) | + +### /datasets/{dataset_id}/retry + +#### POST +##### Summary + +retry document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRetryPayload](#documentretrypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/use-check + +#### GET +##### Description + +Check if dataset is in use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset use status retrieved successfully | + +### /datasets/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for a dataset + +##### Description + +Get all API keys for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for a dataset + +##### Description + +Create a new API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for a dataset + +##### Description + +Delete an API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /email-code-login + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-code-login/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginPayload](#emailcodeloginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/send-email + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/validity + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /explore/apps + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RecommendedAppsQuery](#recommendedappsquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [RecommendedAppListResponse](#recommendedapplistresponse) | + +### /explore/apps/{app_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /features + +#### GET +##### Summary + +Get feature configuration for current tenant + +##### Description + +Get feature configuration for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [FeatureResponse](#featureresponse) | + +### /files/support-type + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /files/upload + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [UploadConfig](#uploadconfig) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | + +### /files/{file_id}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| file_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Email sent successfully | [ForgotPasswordEmailResponse](#forgotpasswordemailresponse) | +| 400 | Invalid email or rate limit exceeded | | + +### /forgot-password/resets + +#### POST +##### Description + +Reset password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Password reset successfully | [ForgotPasswordResetResponse](#forgotpasswordresetresponse) | +| 400 | Invalid token or password mismatch | | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Code verified successfully | [ForgotPasswordCheckResponse](#forgotpasswordcheckresponse) | +| 400 | Invalid code or token | | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by form token + +##### Description + +GET /console/api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by form token + +##### Description + +POST /console/api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /info + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /installed-apps + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [InstalledAppListResponse](#installedapplistresponse) | + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionMessageExplorePayload](#completionmessageexplorepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/pin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/unpin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/feedbacks + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/more-like-this + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MoreLikeThisQuery](#morelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/meta + +#### GET +##### Summary + +Get app meta + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageListQuery](#savedmessagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageCreatePayload](#savedmessagecreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages/{message_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /instruction-generate + +#### POST +##### Description + +Generate instruction for workflow nodes or general use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionGeneratePayload](#instructiongeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Instruction generated successfully | +| 400 | Invalid request parameters or flow/workflow not found | +| 402 | Provider quota exceeded | + +### /instruction-generate/template + +#### POST +##### Description + +Get instruction generation template + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionTemplatePayload](#instructiontemplatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Template retrieved successfully | +| 400 | Invalid request parameters | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /logout + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /mcp/oauth/callback + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notification + +#### GET +##### Description + +Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success — inspect should_show to decide whether to render the modal | +| 401 | Unauthorized | + +### /notification/dismiss + +#### POST +##### Description + +Mark a notification as dismissed for the current user. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 401 | Unauthorized | + +### /notion/pages/{page_id}/{page_type}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notion/pre-import/pages + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/authorize/{provider} + +#### GET +##### Description + +Handle OAuth callback and complete login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| state | query | Optional state parameter (used for invite token) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with access token | +| 400 | OAuth process failed | + +### /oauth/data-source/binding/{provider} + +#### GET +##### Description + +Bind OAuth data source with authorization code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source binding success | [OAuthDataSourceBindingResponse](#oauthdatasourcebindingresponse) | +| 400 | Invalid provider or code | | + +### /oauth/data-source/callback/{provider} + +#### GET +##### Description + +Handle OAuth callback from data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| error | query | Error message from OAuth provider | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with result | +| 400 | Invalid provider | + +### /oauth/data-source/{provider} + +#### GET +##### Description + +Get OAuth authorization URL for data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Authorization URL or internal setup success | [OAuthDataSourceResponse](#oauthdatasourceresponse) | +| 400 | Invalid provider | | +| 403 | Admin privileges required | | + +### /oauth/data-source/{provider}/{binding_id}/sync + +#### GET +##### Description + +Sync data from OAuth data source + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | Data source binding ID | Yes | string | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source sync success | [OAuthDataSourceSyncResponse](#oauthdatasourcesyncresponse) | +| 400 | Invalid provider or sync failed | | + +### /oauth/login/{provider} + +#### GET +##### Description + +Initiate OAuth login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| invite_token | query | Optional invitation token | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to OAuth authorization URL | +| 400 | Invalid provider | + +### /oauth/plugin/{provider_id}/datasource/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider_id}/datasource/get-authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/trigger/callback + +#### GET +##### Summary + +Handle OAuth callback for trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/account + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/authorize + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/customized/templates/{template_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/dataset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineDatasetImportPayload](#ragpipelinedatasetimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/empty-dataset + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates/{template_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/datasource-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineImportPayload](#ragpipelineimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{pipeline_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/recommended-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/transform/datasets/{dataset_id} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/customized/publish + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Payload](#payload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/exports + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| block_type | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft + +#### GET +##### Summary + +Get draft rag pipeline's workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Sync draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect + +#### POST +##### Summary + +Set datasource variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceVariablesPayload](#datasourcevariablespayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/environment-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunRequiredPayload](#noderunrequiredpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/system-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/publish + +#### GET +##### Summary + +Get published pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview + +#### POST +##### Summary + +Run datasource content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/run + +#### POST +##### Summary + +Run published workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [PublishedWorkflowRunPayload](#publishedworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete a published workflow version that is not currently active on the pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /refresh-token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/{url} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /reset-password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rule-code-generate + +#### POST +##### Description + +Generate code rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleCodeGeneratePayload](#rulecodegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Code rules generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-generate + +#### POST +##### Description + +Generate rule configuration using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleGeneratePayload](#rulegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Rule configuration generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-structured-output-generate + +#### POST +##### Description + +Generate structured output rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleStructuredOutputPayload](#rulestructuredoutputpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Structured output generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /spec/schema-definitions + +#### GET +##### Summary + +Get system JSON Schema definitions specification + +##### Description + +Used for frontend component type mapping + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /system-features + +#### GET +##### Summary + +Get system-wide feature configuration + +##### Description + +Get system-wide feature configuration +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for dashboard initialization. + +Authentication would create circular dependency (can't login without dashboard loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [SystemFeatureResponse](#systemfeatureresponse) | + +### /tag-bindings + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tag-bindings/remove + +#### POST +##### Description + +Remove one or more tag bindings from a target. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingRemovePayload](#tagbindingremovepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword for tag name. | No | string | +| type | query | Tag type filter. Can be "knowledge" or "app". | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ [TagResponse](#tagresponse) ] | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags/{tag_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /test/retrieval + +#### POST +##### Description + +Bedrock retrieval test (internal use only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [BedrockRetrievalPayload](#bedrockretrievalpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Bedrock retrieval test completed | + +### /trial-apps/{app_id} + +#### GET +##### Summary + +Get app detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ChatRequest](#chatrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionRequest](#completionrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/datasets + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Returns the site configuration for the application including theme, icons, and text. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [TextToSpeechRequest](#texttospeechrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows + +#### GET +##### Summary + +Get workflow detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunRequest](#workflowrunrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /website/crawl + +#### POST +##### Description + +Crawl website content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlPayload](#websitecrawlpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Website crawl initiated successfully | +| 400 | Invalid crawl parameters | + +### /website/crawl/status/{job_id} + +#### GET +##### Description + +Get website crawl status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlStatusQuery](#websitecrawlstatusquery) | +| job_id | path | Crawl job ID | Yes | string | +| provider | query | Crawl provider (firecrawl/watercrawl/jinareader) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Crawl status retrieved successfully | +| 400 | Invalid provider | +| 404 | Crawl job not found | + +### /workflow/{workflow_run_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /console/api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workflow/{workflow_run_id}/pause-details + +#### GET +##### Summary + +Get workflow pause details + +##### Description + +GET /console/api/workflow//pause-details + +Returns information about why and where the workflow is paused. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /workspaces/current/agent-provider/{provider_name} + +#### GET +##### Description + +Get specific agent provider details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_name | path | Agent provider name | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | object | + +### /workspaces/current/agent-providers + +#### GET +##### Description + +Get list of available agent providers + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ object ] | + +### /workspaces/current/dataset-operators + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/default-model + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGetDefault](#parsergetdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPostDefault](#parserpostdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/endpoints + +#### POST +##### Description + +Create a new plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/create + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/delete + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/disable + +#### POST +##### Description + +Disable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint disabled successfully | [EndpointDisableResponse](#endpointdisableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/enable + +#### POST +##### Description + +Enable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint enabled successfully | [EndpointEnableResponse](#endpointenableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/list + +#### GET +##### Description + +List plugin endpoints with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListQuery](#endpointlistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EndpointListResponse](#endpointlistresponse) | + +### /workspaces/current/endpoints/list/plugin + +#### GET +##### Description + +List endpoints for a specific plugin + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListForPluginQuery](#endpointlistforpluginquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [PluginEndpointListResponse](#pluginendpointlistresponse) | + +### /workspaces/current/endpoints/update + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LegacyEndpointUpdatePayload](#legacyendpointupdatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/{id} + +#### DELETE +##### Description + +Delete a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +#### PATCH +##### Description + +Update a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointUpdatePayload](#endpointupdatepayload) | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/members + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/members/invite-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MemberInvitePayload](#memberinvitepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/owner-transfer-check + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferCheckPayload](#ownertransfercheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/send-owner-transfer-confirm-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferEmailPayload](#ownertransferemailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/owner-transfer + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [OwnerTransferPayload](#ownertransferpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/update-role + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [MemberRoleUpdatePayload](#memberroleupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserModelList](#parsermodellist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/checkout-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialDelete](#parsercredentialdelete) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialId](#parsercredentialid) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialCreate](#parsercredentialcreate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialUpdate](#parsercredentialupdate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialSwitch](#parsercredentialswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialValidate](#parsercredentialvalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPostModels](#parserpostmodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteCredential](#parserdeletecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserGetCredentials](#parsergetcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCreateCredential](#parsercreatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserUpdateCredential](#parserupdatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserSwitch](#parserswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserValidate](#parservalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/disable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/enable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| config_id | path | | Yes | string | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/parameter-rules + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserParameter](#parserparameter) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/preferred-provider-type + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPreferredProviderType](#parserpreferredprovidertype) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/permission + +#### GET +##### Summary + +Get workspace permission settings + +##### Description + +Returns permission flags that control workspace features like member invitations and owner transfer. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/asset + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserAsset](#parserasset) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/debugging-key + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/fetch-manifest + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserIcon](#parsericon) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubInstall](#parsergithubinstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/pkg + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserList](#parserlist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/installations/ids + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/latest-versions + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/marketplace/pkg + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptions](#parserdynamicoptions) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options-with-credentials + +#### POST +##### Summary + +Fetch dynamic options using credentials directly (for edit mode) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptionsWithCredentials](#parserdynamicoptionswithcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPermissionChange](#parserpermissionchange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/autoupgrade/exclude + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserExcludePlugin](#parserexcludeplugin) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPreferencesChange](#parserpreferenceschange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/readme + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserReadme](#parserreadme) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserTasks](#parsertasks) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/delete_all + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete/{identifier} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| identifier | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/uninstall + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserUninstall](#parseruninstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpgrade](#parsergithubupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserMarketplaceUpgrade](#parsermarketplaceupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/bundle + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpload](#parsergithubupload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/pkg + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-labels + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderAddPayload](#apitoolprovideraddpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderDeletePayload](#apitoolproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/remote + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/schema + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolSchemaPayload](#apitoolschemapayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/test/pre + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolTestPayload](#apitooltestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderUpdatePayload](#apitoolproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolAddPayload](#builtintooladdpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| credential_type | path | | Yes | string | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credentials + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/default-credential + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinProviderDefaultCredentialPayload](#builtinproviderdefaultcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolCredentialDeletePayload](#builtintoolcredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ToolOAuthCustomClientPayload](#tooloauthcustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/tools + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolUpdatePayload](#builtintoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderDeletePayload](#mcpproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderCreatePayload](#mcpprovidercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderUpdatePayload](#mcpproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/auth + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPAuthPayload](#mcpauthpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/tools/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/update/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/create + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolCreatePayload](#workflowtoolcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolDeletePayload](#workflowtooldeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolUpdatePayload](#workflowtoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-providers + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/api + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/builtin + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/mcp + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/workflow + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/info + +#### GET +##### Summary + +Get info for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/oauth/client + +#### DELETE +##### Summary + +Remove custom OAuth client configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Summary + +Get OAuth client configuration for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Configure custom OAuth client for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerOAuthClientPayload](#triggeroauthclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id} + +#### POST +##### Summary + +Build a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/create + +#### POST +##### Summary + +Add a new subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderCreatePayload](#triggersubscriptionbuildercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id} + +#### GET +##### Summary + +Get the request logs for a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id} + +#### POST +##### Summary + +Update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id} + +#### POST +##### Summary + +Verify and update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id} + +#### GET +##### Summary + +Get a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/list + +#### GET +##### Summary + +List all trigger subscriptions for the current tenant's provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize + +#### GET +##### Summary + +Initiate OAuth authorization flow for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id} + +#### POST +##### Summary + +Verify credentials for an existing subscription (edit mode only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete + +#### POST +##### Summary + +Delete a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/update + +#### POST +##### Summary + +Update a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/triggers + +#### GET +##### Summary + +List all trigger providers for the current tenant + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceCustomConfigPayload](#workspacecustomconfigpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config/webapp-logo/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/info + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceInfoPayload](#workspaceinfopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SwitchWorkspacePayload](#switchworkspacepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| icon_type | path | | Yes | string | +| lang | path | | Yes | string | +| provider | path | | Yes | string | +| tenant_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +## default +Default namespace + +### /explore/banners + +#### GET +##### Summary + +Get banner list + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### APIBasedExtensionListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| APIBasedExtensionListResponse | array | | | + +#### APIBasedExtensionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | API endpoint URL | Yes | +| api_key | string | API key for authentication | Yes | +| name | string | Extension name | Yes | + +#### APIBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | | Yes | +| api_key | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| name | string | | Yes | + +#### Account + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| interface_language | | | No | +| interface_theme | | | No | +| is_password_set | boolean | | Yes | +| last_login_at | | | No | +| last_login_ip | | | No | +| name | string | | Yes | +| timezone | | | No | + +#### AccountAvatarPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | | Yes | + +#### AccountAvatarQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | Avatar file ID | Yes | + +#### AccountDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### AccountDeletionFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| feedback | string | | Yes | + +#### AccountInitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | +| invitation_code | | | No | +| timezone | string | | Yes | + +#### AccountIntegrateListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AccountIntegrateResponse](#accountintegrateresponse) ] | | Yes | + +#### AccountIntegrateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| is_bound | boolean | | Yes | +| link | | | No | +| provider | string | | Yes | + +#### AccountInterfaceLanguagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | + +#### AccountInterfaceThemePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_theme | string | *Enum:* `"dark"`, `"light"` | Yes | + +#### AccountNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### AccountPasswordPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password | | | No | +| repeat_new_password | string | | Yes | + +#### AccountTimezonePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| timezone | string | | Yes | + +#### AccountWithRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| last_active_at | | | No | +| last_login_at | | | No | +| name | string | | Yes | +| role | string | | Yes | +| status | string | | Yes | + +#### AccountWithRoleList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| accounts | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### ActivateCheckQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| interface_language | string | | Yes | +| name | string | | Yes | +| timezone | string | | Yes | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivationCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Activation data if valid | No | +| is_valid | boolean | Whether token is valid | Yes | + +#### ActivationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### AdvancedChatWorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | | No | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| message_id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### AdvancedChatWorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AdvancedChatWorkflowRunForList](#advancedchatworkflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### AdvancedChatWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | | | No | +| parent_message_id | | | No | +| query | string | | No | + +#### AdvancedPromptTemplateQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_mode | string | Application mode | Yes | +| has_context | string | Whether has context | No | +| model_mode | string | Model mode | Yes | +| model_name | string | Model name | Yes | + +#### AgentLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| message_id | string | Message UUID | Yes | + +#### AgentThought + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chain_id | | | No | +| created_at | | | No | +| files | [ string ] | | Yes | +| id | string | | Yes | +| message_chain_id | | | No | +| message_id | string | | Yes | +| observation | | | No | +| position | integer | | Yes | +| thought | | | No | +| tool | | | No | +| tool_input | | | No | +| tool_labels | [JSONValue](#jsonvalue) | | Yes | + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCountResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| count | integer | Number of annotations | Yes | + +#### AnnotationExportList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | + +#### AnnotationFilePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | Message ID | Yes | + +#### AnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_content | | | No | +| annotation_question | | | No | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | +| score | | | No | +| source | | | No | + +#### AnnotationHitHistoryList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AnnotationHitHistory](#annotationhithistory) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | string | Search keyword | No | +| limit | integer | Page size | No | +| page | integer | Page number | No | + +#### AnnotationReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### AnnotationReplyStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | *Enum:* `"disable"`, `"enable"` | Yes | + +#### AnnotationSettingUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Score threshold | Yes | + +#### ApiKeyAuthBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| credentials | object | | Yes | +| provider | string | | Yes | + +#### ApiKeyItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| last_used_at | | | No | +| token | string | | Yes | +| type | string | | Yes | + +#### ApiKeyList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ApiKeyItem](#apikeyitem) ] | | Yes | + +#### ApiProviderSchemaType + +Enum class for api provider schema type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ApiProviderSchemaType | string | Enum class for api provider schema type. | | + +#### ApiToolProviderAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | | Yes | + +#### ApiToolProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| original_provider | string | | Yes | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolSchemaPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| schema | string | | Yes | + +#### ApiToolTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| parameters | object | | Yes | +| provider_name | | | No | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | +| tool_name | string | | Yes | + +#### AppApiStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_api | boolean | Enable or disable API | Yes | + +#### AppDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| id | string | | Yes | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppDetailKernel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| mode | string | | No | +| name | string | | No | + +#### AppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| api_base_url | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| deleted_tools | [ [DeletedTool](#deletedtool) ] | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| site | | | No | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | boolean | Include secrets in export | No | +| workflow_id | | Specific workflow ID to export | No | + +#### AppExportResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | | Yes | + +#### AppIconPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | Icon data | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | + +#### AppImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | Import mode | Yes | +| name | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### AppListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_created_by_me | | Filter by creator | No | +| limit | integer | Page size (1-100) | No | +| mode | string | App mode filter
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"all"`, `"channel"`, `"chat"`, `"completion"`, `"workflow"` | No | +| name | | Filter by app name | No | +| page | integer | Page number (1-99999) | No | +| tag_ids | | Filter by tag IDs | No | + +#### AppMCPServerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | +| parameters | | | Yes | +| server_code | string | | Yes | +| status | [AppMCPServerStatus](#appmcpserverstatus) | | Yes | +| updated_at | | | No | + +#### AppMCPServerStatus + +AppMCPServer Status Enum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| AppMCPServerStatus | string | AppMCPServer Status Enum | | + +#### AppNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Name to check | Yes | + +#### AppPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [AppPartial](#apppartial) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### AppPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| author_name | | | No | +| create_user_name | | | No | +| created_at | | | No | +| created_by | | | No | +| desc_or_prompt | | | No | +| has_draft_trigger | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppSiteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| code | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | string | | Yes | +| default_language | string | | Yes | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| privacy_policy | | | No | +| prompt_public | boolean | | Yes | +| show_workflow_steps | boolean | | Yes | +| title | string | | Yes | +| use_icon_as_answer_icon | boolean | | Yes | + +#### AppSiteStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_site | boolean | Enable or disable site | Yes | + +#### AppSiteUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| use_icon_as_answer_icon | | | No | + +#### AppTracePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | Enable or disable tracing | Yes | +| tracing_provider | | Tracing provider | No | + +#### AudioTranscriptResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| text | string | Transcribed text from audio | Yes | + +#### BatchAddNotificationAccountsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notification_id | string | | Yes | +| user_email | [ string ] | List of account email addresses | Yes | + +#### BatchImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| upload_file_id | string | | Yes | + +#### BedrockRetrievalPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| knowledge_id | string | | Yes | +| query | string | | Yes | +| retrieval_setting | [BedrockRetrievalSetting](#bedrockretrievalsetting) | | Yes | + +#### BedrockRetrievalSetting + +Retrieval settings for Amazon Bedrock knowledge base queries. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Minimum relevance score threshold | No | +| top_k | | Maximum number of results to retrieve | No | + +#### BuiltinProviderDefaultCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### BuiltinToolAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | +| type | [CredentialType](#credentialtype) | | Yes | + +#### BuiltinToolCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### BuiltinToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### ButtonStyle + +Button styles for user actions. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ButtonStyle | string | Button styles for user actions. | | + +#### ChangeEmailResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_email | string | | Yes | +| token | string | | Yes | + +#### ChangeEmailSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | +| phase | | | No | +| token | | | No | + +#### ChangeEmailValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ChatConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| sort_by | string | Sort field and direction
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query | Yes | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### ChatMessagesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### ChatRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | object | | Yes | +| parent_message_id | | | No | +| query | string | | Yes | +| retriever_from | string | | No | + +#### CheckDependenciesResult + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [PluginDependency](#plugindependency) ] | | No | + +#### CheckEmailUniquePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | + +#### ChildChunkBatchUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunks | [ [ChildChunkUpdateArgs](#childchunkupdateargs) ] | | Yes | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | +| id | | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CodeBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Extension data | Yes | +| module | string | Module name | Yes | + +#### CompletionConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### CompletionMessageExplorePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| query | string | Query text | No | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### CompletionRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### ComplianceDownloadQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_name | string | Compliance document name | Yes | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConsoleDatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ids | [ string ] | Filter by dataset IDs | No | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### Conversation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotation | | | No | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| read_at | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationAnnotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| account | | | No | +| content | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | + +#### ConversationAnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_create_account | | | No | +| created_at | | | No | +| id | string | | Yes | + +#### ConversationDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| introduction | | | No | +| message_count | integer | | Yes | +| model_config | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | + +#### ConversationMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| status | string | | Yes | + +#### ConversationPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [Conversation](#conversation) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | Conversation variables for the draft workflow | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID to filter variables | Yes | + +#### ConversationWithSummary + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| message_count | integer | | Yes | +| model_config | | | No | +| name | string | | Yes | +| read_at | | | No | +| status | string | | Yes | +| status_count | | | No | +| summary_or_query | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationWithSummaryPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [ConversationWithSummary](#conversationwithsummary) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConvertToWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| name | | | No | + +#### CopyAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Description for the copied app | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| name | | Name for the copied app | No | + +#### CreateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | Annotation reply data | No | +| answer | | Answer text | No | +| content | | Content text | No | +| message_id | | Message ID | No | +| question | | Question text | No | + +#### CreateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| mode | string | App mode
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"chat"`, `"completion"`, `"workflow"` | Yes | +| name | string | App name | Yes | + +#### CredentialType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| CredentialType | string | | | + +#### DataSource + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| info_list | [InfoList](#infolist) | | Yes | + +#### DataSourceIntegrate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| disabled | boolean | | No | +| id | string | | No | +| is_bound | boolean | | No | +| link | string | | No | +| provider | string | | No | +| source_info | [DataSourceIntegrateWorkspace](#datasourceintegrateworkspace) | | No | + +#### DataSourceIntegrateIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | string | | No | +| type | string | | No | +| url | string | | No | + +#### DataSourceIntegrateList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [DataSourceIntegrate](#datasourceintegrate) ] | | No | + +#### DataSourceIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### DataSourceIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [DataSourceIntegratePage](#datasourceintegratepage) ] | | No | +| total | integer | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### DatasetAndDocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| batch | string | | Yes | +| dataset | [DatasetResponse](#datasetresponse) | | Yes | +| documents | [ [DocumentResponse](#documentresponse) ] | | Yes | + +#### DatasetBase + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| id | string | | No | +| indexing_technique | string | | No | +| name | string | | No | +| permission | string | | No | + +#### DatasetContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| content_type | string | | No | +| file_info | [DatasetFileInfo](#datasetfileinfo) | | No | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | + +#### DatasetDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_count | integer | | No | +| author_name | string | | No | +| built_in_field_enabled | boolean | | No | +| chunk_structure | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| doc_form | string | | No | +| doc_metadata | [ [DatasetDocMetadata](#datasetdocmetadata) ] | | No | +| document_count | integer | | No | +| embedding_available | boolean | | No | +| embedding_model | string | | No | +| embedding_model_provider | string | | No | +| enable_api | boolean | | No | +| external_knowledge_info | [ExternalKnowledgeInfo](#externalknowledgeinfo) | | No | +| external_retrieval_model | [ExternalRetrievalModel](#externalretrievalmodel) | | No | +| icon_info | [DatasetIconInfo](#dataseticoninfo) | | No | +| id | string | | No | +| indexing_technique | string | | No | +| is_multimodal | boolean | | No | +| is_published | boolean | | No | +| name | string | | No | +| permission | string | | No | +| pipeline_id | string | | No | +| provider | string | | No | +| retrieval_model_dict | [DatasetRetrievalModel](#datasetretrievalmodel) | | No | +| runtime_mode | string | | No | +| summary_index_setting | [_AnonymousInlineModel_b1954337d565](#_anonymousinlinemodel_b1954337d565) | | No | +| tags | [ [Tag](#tag) ] | | No | +| total_available_documents | integer | | No | +| total_documents | integer | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| word_count | integer | | No | + +#### DatasetDocMetadata + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### DatasetFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | string | | No | +| id | string | | No | +| mime_type | string | | No | +| name | string | | No | +| size | integer | | No | +| source_url | string | | No | + +#### DatasetIconInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | string | | No | + +#### DatasetKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetQueryDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| created_by_role | string | | No | +| id | string | | No | +| queries | [DatasetContent](#datasetcontent) | | No | +| source | string | | No | +| source_app_id | string | | No | + +#### DatasetRerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | string | | No | +| reranking_provider_name | string | | No | + +#### DatasetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| data_source_type | | | No | +| description | | | No | +| id | string | | Yes | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | + +#### DatasetRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_enable | boolean | | No | +| reranking_mode | string | | No | +| reranking_model | [DatasetRerankingModel](#datasetrerankingmodel) | | No | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| search_method | string | | No | +| top_k | integer | | No | +| weights | [DatasetWeightedScore](#datasetweightedscore) | | No | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| icon_info | | | No | +| indexing_technique | | | No | +| is_multimodal | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | No | +| embedding_provider_name | string | | No | +| vector_weight | number | | No | + +#### DatasetWeightedScore + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | [DatasetKeywordSetting](#datasetkeywordsetting) | | No | +| vector_setting | [DatasetVectorSetting](#datasetvectorsetting) | | No | +| weight_type | string | | No | + +#### DatasourceCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### DatasourceCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### DatasourceCredentialUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### DatasourceCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### DatasourceDefaultPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### DatasourceUpdateNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| name | string | | Yes | + +#### DatasourceVariablesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info | object | | Yes | +| datasource_type | string | | Yes | +| start_node_id | string | | Yes | +| start_node_title | string | | Yes | + +#### DebugPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DebugPermission | string | | | + +#### DefaultBlockConfigQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| q | | | No | + +#### DeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | +| tool_name | string | | Yes | +| type | string | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentMetadataResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | +| value | | | No | + +#### DocumentMetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_metadata | | | No | +| doc_type | | | No | + +#### DocumentRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### DocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| word_count | | | No | + +#### DocumentRetryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string ] | | Yes | + +#### DocumentWithSegmentsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| completed_segments | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| process_rule_dict | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| total_segments | | | No | +| word_count | | | No | + +#### DraftWorkflowNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | + +#### DraftWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| start_node_id | string | | Yes | + +#### DraftWorkflowSyncPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | | | No | +| environment_variables | | | No | +| features | | | No | +| graph | object | | Yes | +| hash | | | No | +| rag_pipeline_variables | | | No | + +#### DraftWorkflowTriggerRunAllPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_ids | [ string ] | | Yes | + +#### DraftWorkflowTriggerRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### DraftWorkflowTriggerRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | Node ID | Yes | + +#### EducationActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| institution | string | | Yes | +| role | string | | Yes | +| token | string | | Yes | + +#### EducationAutocompleteQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keywords | string | | Yes | +| limit | integer | | No | +| page | integer | | No | + +#### EducationAutocompleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| curr_page | | | No | +| data | [ string ] | | No | +| has_next | | | No | + +#### EducationStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_refresh | | | No | +| expire_at | | | No | +| is_student | | | No | +| result | | | No | + +#### EducationVerifyResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | | | No | + +#### EmailCodeLoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| language | | | No | +| token | string | | Yes | + +#### EmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailRegisterResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### EmailRegisterSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| language | | Language code | No | + +#### EmailRegisterValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### EndpointCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| settings | object | | Yes | + +#### EndpointCreateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDeleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDisableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointEnableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointIdPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | + +#### EndpointListForPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | +| plugin_id | string | | Yes | + +#### EndpointListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | + +#### EndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### EndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### EndpointUpdateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EnvironmentVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| environment_variables | [ object ] | Environment variables for the draft workflow | Yes | + +#### ExecutionContentType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ExecutionContentType | string | | | + +#### ExternalApiTemplateListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | + +#### ExternalDatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| external_knowledge_api_id | string | | Yes | +| external_knowledge_id | string | | Yes | +| external_retrieval_model | | | No | +| name | string | | Yes | + +#### ExternalHitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_retrieval_model | | | No | +| metadata_filtering_conditions | | | No | +| query | string | | Yes | + +#### ExternalKnowledgeApiPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### ExternalKnowledgeInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_knowledge_api_endpoint | string | | No | +| external_knowledge_api_id | string | | No | +| external_knowledge_api_name | string | | No | +| external_knowledge_id | string | | No | + +#### ExternalRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| top_k | integer | | No | + +#### FeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Feature configuration object | No | + +#### Feedback + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| from_account | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| rating | string | | Yes | + +#### FeedbackExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end_date | | End date (YYYY-MM-DD) | No | +| format | string | Export format
*Enum:* `"csv"`, `"json"` | No | +| from_source | | Filter by feedback source | No | +| has_comment | | Only include feedback with comments | No | +| rating | | Filter by rating | No | +| start_date | | Start date (YYYY-MM-DD) | No | + +#### FeedbackStat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dislike | integer | | Yes | +| like | integer | | Yes | + +#### FileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_ids | [ string ] | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| is_valid | boolean | Whether code is valid | Yes | +| token | string | New reset token | Yes | + +#### ForgotPasswordEmailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | | Error code if account not found | No | +| data | | Reset token | No | +| result | string | Operation result | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### FormInput + +Form input definition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| default | | | No | +| output_variable_name | string | | Yes | +| type | [FormInputType](#forminputtype) | | Yes | + +#### FormInputDefault + +Default configuration for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| selector | [ string ] | | No | +| type | [PlaceholderType](#placeholdertype) | | Yes | +| value | string | | No | + +#### FormInputType + +Form input types. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| FormInputType | string | Form input types. | | + +#### GenerateSummaryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_list | [ string ] | | Yes | + +#### Github + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| github_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### HitTestingChildChunk + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| id | | | No | +| position | | | No | +| score | | | No | + +#### HitTestingDocument + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | | | No | +| doc_metadata | | | No | +| doc_type | | | No | +| id | | | No | +| name | | | No | + +#### HitTestingFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | | | No | +| id | | | No | +| mime_type | | | No | +| name | | | No | +| size | | | No | +| source_url | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HitTestingRecord + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| child_chunks | [ [HitTestingChildChunk](#hittestingchildchunk) ] | | No | +| files | [ [HitTestingFile](#hittestingfile) ] | | No | +| score | | | No | +| segment | | | No | +| summary | | | No | +| tsne_position | | | No | + +#### HitTestingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| query | string | | Yes | +| records | [ [HitTestingRecord](#hittestingrecord) ] | | No | + +#### HitTestingSegment + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| completed_at | | | No | +| content | | | No | +| created_at | | | No | +| created_by | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| document | | | No | +| document_id | | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | | | No | +| index_node_hash | | | No | +| index_node_id | | | No | +| indexing_at | | | No | +| keywords | [ string ] | | No | +| position | | | No | +| sign_content | | | No | +| status | | | No | +| stopped_at | | | No | +| tokens | | | No | +| word_count | | | No | + +#### HumanInputContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| form_definition | | | No | +| form_submission_data | | | No | +| submitted | boolean | | Yes | +| type | [ExecutionContentType](#executioncontenttype) | | No | +| workflow_run_id | string | | Yes | + +#### HumanInputDeliveryTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| delivery_method_id | string | Delivery method ID | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormDefinition + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| actions | [ [UserAction](#useraction) ] | | No | +| display_in_ui | boolean | | No | +| expiration_time | integer | | Yes | +| form_content | string | | Yes | +| form_id | string | | Yes | +| form_token | | | No | +| inputs | [ [FormInput](#forminput) ] | | No | +| node_id | string | | Yes | +| node_title | string | | Yes | +| resolved_default_values | object | | No | + +#### HumanInputFormPreviewPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormSubmissionData + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action_id | string | | Yes | +| action_text | string | | Yes | +| node_id | string | | Yes | +| node_title | string | | Yes | +| rendered_content | string | | Yes | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | Selected action ID | Yes | +| form_inputs | object | Values the user provides for the form's own fields | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | Yes | + +#### IconType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| IconType | string | | | + +#### Import + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| app_mode | | | No | +| current_dsl_version | string | | No | +| error | string | | No | +| id | string | | Yes | +| imported_dsl_version | string | | No | +| status | [ImportStatus](#importstatus) | | Yes | + +#### ImportStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ImportStatus | string | | | + +#### IncludeSecretQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | string | | No | + +#### IndexingEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dataset_id | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| indexing_technique | string | | Yes | +| info_list | object | | Yes | +| process_rule | object | | Yes | + +#### InfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | string | *Enum:* `"notion_import"`, `"upload_file"`, `"website_crawl"` | Yes | +| file_info_list | | | No | +| notion_info_list | | | No | +| website_info_list | | | No | + +#### Inner + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | | | No | +| model_type | [ModelType](#modeltype) | | Yes | +| provider | | | No | + +#### InsertExploreAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| can_trial | boolean | | No | +| category | string | | Yes | +| copyright | | | No | +| custom_disclaimer | | | No | +| desc | | | No | +| language | string | | Yes | +| position | integer | | Yes | +| privacy_policy | | | No | +| trial_limit | integer | | No | + +#### InsertExploreBannerPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| description | string | | Yes | +| img-src | string | | Yes | +| language | string | | No | +| link | string | | Yes | +| sort | integer | | Yes | +| title | string | | Yes | + +#### InstallPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| InstallPermission | string | | | + +#### InstalledAppCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | + +#### InstalledAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | +| use_icon_as_answer_icon | | | No | + +#### InstalledAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| installed_apps | [ [InstalledAppResponse](#installedappresponse) ] | | Yes | + +#### InstalledAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | [InstalledAppInfoResponse](#installedappinforesponse) | | Yes | +| app_owner_tenant_id | string | | Yes | +| editable | boolean | | Yes | +| id | string | | Yes | +| is_pinned | boolean | | Yes | +| last_used_at | | | No | +| uninstallable | boolean | | Yes | + +#### InstalledAppUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_pinned | | | No | + +#### InstalledAppsListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | App ID to filter by | No | + +#### InstructionGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current | string | Current instruction text | No | +| flow_id | string | Workflow/Flow ID | Yes | +| ideal_output | string | Expected ideal output | No | +| instruction | string | Instruction for generation | Yes | +| language | string | Programming language (javascript/python) | No | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| node_id | string | Node ID for workflow context | No | + +#### InstructionTemplatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | Instruction template type | Yes | + +#### IterationNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### JSONValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JSONValue | | | | + +#### KnowledgeConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| duplicate | boolean | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | string | *Enum:* `"economy"`, `"high_quality"` | Yes | +| is_multimodal | boolean | | No | +| name | | | No | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### LLMMode + +Enum class for large language model mode. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| LLMMode | string | Enum class for large language model mode. | | + +#### LangContentPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| body | string | | Yes | +| lang | string | Language tag: 'zh' \| 'en' \| 'jp' | Yes | +| subtitle | | | No | +| title | string | | Yes | +| title_pic_url | | | No | + +#### LegacyEndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | +| name | string | | Yes | +| settings | object | | Yes | + +#### LoadBalancingCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### LoadBalancingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| configs | | | No | +| enabled | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| invite_token | | Invitation token | No | +| password | string | | Yes | +| remember_me | boolean | Remember me flag | No | + +#### LoopNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### MCPAuthPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authorization_code | | | No | +| provider_id | string | | Yes | + +#### MCPProviderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | + +#### MCPProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| provider_id | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPServerCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| parameters | object | Server parameters configuration | Yes | + +#### MCPServerUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| id | string | Server ID | Yes | +| parameters | object | Server parameters configuration | Yes | +| status | | Server status | No | + +#### Marketplace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marketplace_plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### MemberInvitePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emails | [ string ] | | No | +| language | | | No | +| role | [TenantAccountRole](#tenantaccountrole) | | Yes | + +#### MemberRoleUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| role | string | | Yes | + +#### MessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | Yes | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | integer | | Yes | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| feedbacks | [ [Feedback](#feedback) ] | | Yes | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | [JSONValue](#jsonvalue) | | Yes | +| message_files | [ [MessageFile](#messagefile) ] | | Yes | +| message_metadata_dict | [JSONValue](#jsonvalue) | | Yes | +| message_tokens | integer | | Yes | +| parent_message_id | | | No | +| provider_response_latency | number | | Yes | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageDetailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | No | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | | | No | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| extra_contents | [ [HumanInputContent](#humaninputcontent) ] | | No | +| feedbacks | [ [Feedback](#feedback) ] | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | | | No | +| message_files | [ [MessageFile](#messagefile) ] | | No | +| message_metadata_dict | | | No | +| message_tokens | | | No | +| parent_message_id | | | No | +| provider_response_latency | | | No | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| message_id | string | Message ID | Yes | +| rating | | | No | + +#### MessageFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| belongs_to | | | No | +| filename | string | | Yes | +| id | string | | Yes | +| mime_type | | | No | +| size | | | No | +| transfer_method | string | | Yes | +| type | string | | Yes | +| upload_file_id | | | No | +| url | | | No | + +#### MessageInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [MessageDetailResponse](#messagedetailresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### ModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_params | object | | No | +| mode | [LLMMode](#llmmode) | | Yes | +| name | string | | Yes | +| provider | string | | Yes | + +#### ModelConfigPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| model_dict | | | No | +| pre_prompt | | | No | +| updated_at | | | No | +| updated_by | | | No | + +#### ModelConfigRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | | Agent mode configuration | No | +| configs | | Model configuration parameters | No | +| dataset_configs | | Dataset configurations | No | +| model | | Model name | No | +| more_like_this | | More like this configuration | No | +| opening_statement | | Opening statement | No | +| provider | | Model provider | No | +| retrieval_model | | Retrieval model configuration | No | +| speech_to_text | | Speech to text configuration | No | +| suggested_questions | | Suggested questions | No | +| text_to_speech | | Text to speech configuration | No | +| tools | | Available tools | No | + +#### ModelType + +Enum class for model type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ModelType | string | Enum class for model type. | | + +#### MoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | Yes | + +#### NodeIdQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### NodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### NodeRunRequiredPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | | Yes | + +#### NotionEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| notion_info_list | [ object ] | | Yes | +| process_rule | object | | Yes | + +#### NotionIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | | | No | +| type | string | | Yes | +| url | | | No | + +#### NotionInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| pages | [ [NotionPage](#notionpage) ] | | Yes | +| workspace_id | string | | Yes | + +#### NotionIntegrateInfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notion_info | [ [NotionIntegrateWorkspace](#notionintegrateworkspace) ] | | No | + +#### NotionIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_bound | boolean | | No | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### NotionIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [NotionIntegratePage](#notionintegratepage) ] | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### NotionPage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | | | No | +| page_id | string | | Yes | +| page_name | string | | Yes | +| type | string | | Yes | + +#### OAuthDataSourceBindingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OAuthDataSourceResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | Authorization URL or 'internal' for internal setup | Yes | + +#### OAuthDataSourceSyncResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OwnerTransferCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### OwnerTransferEmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### OwnerTransferPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | string | | Yes | + +#### Package + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### PaginatedConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### Parser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### ParserAsset + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_name | string | | Yes | +| plugin_unique_identifier | string | | Yes | + +#### ParserCreateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserCredentialCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialDelete + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialId + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | + +#### ParserCredentialSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### ParserDeleteCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDeleteModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDynamicOptions + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | | | No | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | +| provider_type | string | *Enum:* `"tool"`, `"trigger"` | Yes | + +#### ParserDynamicOptionsWithCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | + +#### ParserEnable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_trigger | boolean | | Yes | +| trigger_id | string | | Yes | + +#### ParserExcludePlugin + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_id | string | | Yes | + +#### ParserGetCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGetDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGithubInstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| filename | string | | Yes | +| tenant_id | string | | Yes | + +#### ParserLatest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_ids | [ string ] | | Yes | + +#### ParserList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserMarketplaceUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | + +#### ParserModelList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | | | No | + +#### ParserParameter + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | + +#### ParserPermissionChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | Yes | +| install_permission | [InstallPermission](#installpermission) | | Yes | + +#### ParserPluginIdentifierQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | + +#### ParserPluginIdentifiers + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifiers | [ string ] | | Yes | + +#### ParserPostDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_settings | [ [Inner](#inner) ] | | Yes | + +#### ParserPostModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| load_balancing | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserPreferencesChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_upgrade | [PluginAutoUpgradeSettingsPayload](#pluginautoupgradesettingspayload) | | Yes | +| permission | [PluginPermissionSettingsPayload](#pluginpermissionsettingspayload) | | Yes | + +#### ParserPreferredProviderType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| preferred_provider_type | string | *Enum:* `"custom"`, `"system"` | Yes | + +#### ParserReadme + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | | No | +| plugin_unique_identifier | string | | Yes | + +#### ParserSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserTasks + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserUninstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_installation_id | string | | Yes | + +#### ParserUpdateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### PartnerTenantsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| click_id | string | Click Id from partner referral link | Yes | + +#### Payload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon_info | | | No | +| name | string | | Yes | + +#### PipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### PlaceholderType + +Default value types for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| PlaceholderType | string | Default value types for form inputs. | | + +#### PluginAutoUpgradeSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| exclude_plugins | [ string ] | | No | +| include_plugins | [ string ] | | No | +| strategy_setting | [StrategySetting](#strategysetting) | | No | +| upgrade_mode | [UpgradeMode](#upgrademode) | | No | +| upgrade_time_of_day | integer | | No | + +#### PluginDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | | | No | +| type | [Type](#type) | | Yes | +| value | | | Yes | + +#### PluginEndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### PluginPermissionSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | No | +| install_permission | [InstallPermission](#installpermission) | | No | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### PublishWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### PublishedWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_preview | boolean | | No | +| original_document_id | | | No | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | No | +| start_node_id | string | | Yes | + +#### RagPipelineDatasetImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| yaml_content | string | | Yes | + +#### RagPipelineImport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_dsl_version | string | | No | +| dataset_id | string | | No | +| error | string | | No | +| id | string | | No | +| imported_dsl_version | string | | No | +| pipeline_id | string | | No | +| status | string | | No | + +#### RagPipelineImportCheckDependencies + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [RagPipelineLeakedDependency](#ragpipelineleakeddependency) ] | | No | + +#### RagPipelineImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | | Yes | +| name | | | No | +| pipeline_id | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### RagPipelineLeakedDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | string | | No | +| type | string | | No | +| value | object | | No | + +#### RagPipelineRecommendedPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | | No | + +#### RecommendedAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | + +#### RecommendedAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| categories | [ string ] | | Yes | +| recommended_apps | [ [RecommendedAppResponse](#recommendedappresponse) ] | | Yes | + +#### RecommendedAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | | | No | +| app_id | string | | Yes | +| can_trial | | | No | +| categories | [ string ] | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| description | | | No | +| is_listed | | | No | +| position | | | No | +| privacy_policy | | | No | + +#### RecommendedAppsQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### RelatedAppList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AppDetailKernel](#appdetailkernel) ] | | No | +| total | integer | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### ResultResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | | Yes | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### RuleCodeGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code_language | string | Programming language for code generation | No | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleStructuredOutputPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Structured output generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | string | | No | +| hit_count_gte | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | [ string ] | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### SimpleMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | | Yes | +| inputs | object | | Yes | +| message | string | | Yes | +| query | string | | Yes | + +#### SimpleModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_dict | | | No | +| pre_prompt | | | No | + +#### Site + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_base_url | | | No | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| code | | | No | +| copyright | | | No | +| created_at | | | No | +| created_by | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | + +#### StatisticTimeRangeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### StatusCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | Yes | +| partial_success | integer | | Yes | +| paused | integer | | Yes | +| success | integer | | Yes | + +#### StrategySetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| StrategySetting | string | | | + +#### SubscriptionQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interval | string | Billing interval
*Enum:* `"month"`, `"year"` | Yes | +| plan | string | Subscription plan
*Enum:* `"professional"`, `"team"` | Yes | + +#### SuggestedQuestionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ string ] | Suggested question | Yes | + +#### SwitchWorkspacePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tenant_id | string | | Yes | + +#### SyncDraftWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | | No | +| environment_variables | [ object ] | | No | +| features | object | | Yes | +| graph | object | | Yes | +| hash | | | No | + +#### SyncDraftWorkflowResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| hash | string | | No | +| result | string | | No | +| updated_at | string | | No | + +#### SystemFeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | System feature configuration object | No | + +#### Tag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### TagBasePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Tag name | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to bind | Yes | +| target_id | string | Target ID to bind tags to | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingRemovePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to remove | Yes | +| target_id | string | Target ID to unbind tag from | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagListQueryParam + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| type | string | Tag type filter
*Enum:* `""`, `"app"`, `"knowledge"` | No | + +#### TagResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | | | No | + +#### TagType + +Tag type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TagType | string | Tag type | | + +#### TenantAccountRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TenantAccountRole | string | | | + +#### TenantInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| custom_config | | | No | +| id | string | | Yes | +| in_trial | | | No | +| name | | | No | +| next_credit_reset_date | | | No | +| plan | | | No | +| role | | | No | +| status | | | No | +| trial_credits | | | No | +| trial_credits_used | | | No | +| trial_end_reason | | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### TextToSpeechPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Whether to stream audio | No | +| text | string | Text to convert | Yes | +| voice | | Voice name | No | + +#### TextToSpeechRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | | No | +| streaming | | | No | +| text | | | No | +| voice | | | No | + +#### TextToSpeechVoiceQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | Language code | Yes | + +#### ToolOAuthCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### ToolParameterForm + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ToolParameterForm | string | | | + +#### TraceConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_config | object | Tracing configuration data | Yes | +| tracing_provider | string | Tracing provider name | Yes | + +#### TraceProviderQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_provider | string | Tracing provider name | Yes | + +#### TrialAppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | string | | No | +| api_base_url | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| deleted_tools | [ [TrialDeletedTool](#trialdeletedtool) ] | | No | +| description | string | | No | +| enable_api | boolean | | No | +| enable_site | boolean | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| max_active_requests | integer | | No | +| mode | string | | No | +| model_config | [TrialAppModelConfig](#trialappmodelconfig) | | No | +| name | string | | No | +| site | [TrialSite](#trialsite) | | No | +| tags | [ [TrialTag](#trialtag) ] | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | +| workflow | [TrialWorkflowPartial](#trialworkflowpartial) | | No | + +#### TrialAppModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | object | | No | +| annotation_reply | object | | No | +| chat_prompt_config | object | | No | +| completion_prompt_config | object | | No | +| created_at | object | | No | +| created_by | string | | No | +| dataset_configs | object | | No | +| dataset_query_variable | string | | No | +| external_data_tools | object | | No | +| file_upload | object | | No | +| model | object | | No | +| more_like_this | object | | No | +| opening_statement | string | | No | +| pre_prompt | string | | No | +| prompt_type | string | | No | +| retriever_resource | object | | No | +| sensitive_word_avoidance | object | | No | +| speech_to_text | object | | No | +| suggested_questions | object | | No | +| suggested_questions_after_answer | object | | No | +| text_to_speech | object | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| user_input_form | object | | No | + +#### TrialConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### TrialDeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | No | +| tool_name | string | | No | +| type | string | | No | + +#### TrialPipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### TrialSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_token | string | | No | +| app_base_url | string | | No | +| chat_color_theme | string | | No | +| chat_color_theme_inverted | boolean | | No | +| code | string | | No | +| copyright | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| custom_disclaimer | string | | No | +| customize_domain | string | | No | +| customize_token_strategy | string | | No | +| default_language | string | | No | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| privacy_policy | string | | No | +| prompt_public | boolean | | No | +| show_workflow_steps | boolean | | No | +| title | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | + +#### TrialTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### TrialWorkflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [TrialConversationVariable](#trialconversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [TrialPipelineVariable](#trialpipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### TrialWorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| id | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | + +#### TriggerOAuthClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enabled | | | No | + +#### TriggerSubscriptionBuilderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_type | string | | No | + +#### TriggerSubscriptionBuilderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | | | No | +| name | | | No | +| parameters | | | No | +| properties | | | No | + +#### TriggerSubscriptionBuilderVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### Type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| Type | string | | | + +#### UpdateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | | No | +| answer | | | No | +| content | | | No | +| question | | | No | + +#### UpdateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| max_active_requests | | Maximum active requests | No | +| name | string | App name | Yes | +| use_icon_as_answer_icon | | Use icon as answer icon | No | + +#### UpgradeMode + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| UpgradeMode | string | | | + +#### UploadConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_image_file_size_limit | | | No | +| audio_file_size_limit | integer | | Yes | +| batch_count_limit | integer | | Yes | +| file_size_limit | integer | | Yes | +| file_upload_limit | | | No | +| image_file_batch_limit | integer | | Yes | +| image_file_size_limit | integer | | Yes | +| single_chunk_attachment_limit | integer | | Yes | +| video_file_size_limit | integer | | Yes | +| workflow_file_upload_limit | integer | | Yes | + +#### UpsertNotificationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| contents | [ [LangContentPayload](#langcontentpayload) ] | | Yes | +| end_time | | RFC3339, e.g. 2026-03-20T23:59:59Z | No | +| frequency | string | 'once' \| 'every_page_load' | No | +| notification_id | | Omit to create; supply UUID to update | No | +| start_time | | RFC3339, e.g. 2026-03-01T00:00:00Z | No | +| status | string | 'active' \| 'inactive' | No | + +#### UserAction + +User action configuration. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| button_style | [ButtonStyle](#buttonstyle) | | No | +| id | string | | Yes | +| title | string | | Yes | + +#### WebhookTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| node_id | string | | Yes | +| webhook_debug_url | string | | Yes | +| webhook_id | string | | Yes | +| webhook_url | string | | Yes | + +#### WebsiteCrawlPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| options | object | | Yes | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | +| url | string | | Yes | + +#### WebsiteCrawlStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | + +#### WebsiteInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| job_id | string | | Yes | +| only_main_content | boolean | | No | +| provider | string | | Yes | +| urls | [ string ] | | Yes | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### Workflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [ConversationVariable](#conversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [PipelineVariable](#pipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowAppLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | Filter logs created after this timestamp | No | +| created_at__before | | Filter logs created before this timestamp | No | +| created_by_account | | Filter by account | No | +| created_by_end_user_session_id | | Filter by end user session ID | No | +| detail | boolean | Whether to return detailed logs | No | +| keyword | | Search keyword for filtering logs | No | +| limit | integer | Number of items per page (1-100) | No | +| page | integer | Page number (1-99999) | No | +| status | | Execution status filter (succeeded, failed, stopped, partial-succeeded) | No | + +#### WorkflowArchivedLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowArchivedLogPartialResponse](#workflowarchivedlogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowArchivedLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| id | string | | Yes | +| trigger_metadata | | | No | +| workflow_run | | | No | + +#### WorkflowCommentBasic + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mention_count | integer | | No | +| participants | [ [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| reply_count | integer | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | +| position_x | number | Comment X position | Yes | +| position_y | number | Comment Y position | Yes | + +#### WorkflowCommentDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mentions | [ [_AnonymousInlineModel_f7ff64cce858](#_anonymousinlinemodel_f7ff64cce858) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| replies | [ [_AnonymousInlineModel_55c39c6a4b9e](#_anonymousinlinemodel_55c39c6a4b9e) ] | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentMentionUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| users | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### WorkflowCommentReplyCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Reply content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | + +#### WorkflowCommentReplyUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentResolve + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | + +#### WorkflowCommentUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | | Mentioned user IDs. Omit to keep existing mentions. | No | +| position_x | | Comment X position | No | +| position_y | | Comment Y position | No | + +#### WorkflowDraftEnvVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftEnvVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftEnvVariable](#workflowdraftenvvariable) ] | | No | + +#### WorkflowDraftVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| full_content | object | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value | object | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariable](#workflowdraftvariable) ] | | No | + +#### WorkflowDraftVariableListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Items per page | No | +| page | integer | Page number | No | + +#### WorkflowDraftVariableListWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariableWithoutValue](#workflowdraftvariablewithoutvalue) ] | | No | +| total | object | | No | + +#### WorkflowDraftVariablePatchPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | | No | +| value | | | No | + +#### WorkflowDraftVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | Variable name | No | +| value | | Variable value | No | + +#### WorkflowDraftVariableWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowExecutionStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| WorkflowExecutionStatus | string | | | + +#### WorkflowFeaturesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Workflow feature configuration | Yes | + +#### WorkflowListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| named_only | boolean | | No | +| page | integer | | No | +| user_id | | | No | + +#### WorkflowOnlineUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_ids | [ string ] | App IDs | No | + +#### WorkflowPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_more | boolean | | No | +| items | [ [Workflow](#workflow) ] | | No | +| limit | integer | | No | +| page | integer | | No | + +#### WorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| id | string | | Yes | +| updated_at | | | No | +| updated_by | | | No | + +#### WorkflowRunCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | No | +| partial_succeeded | integer | | No | +| running | integer | | No | +| stopped | integer | | No | +| succeeded | integer | | No | +| total | integer | | No | + +#### WorkflowRunCountQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | | Workflow run status filter | No | +| time_range | | Time range filter (e.g., 7d, 4h, 30m, 30s) | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| graph | object | | No | +| id | string | | No | +| inputs | object | | No | +| outputs | object | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunExport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| presigned_url | string | Pre-signed URL for download | No | +| presigned_url_expires_at | string | Pre-signed URL expiration time | No | +| status | string | Export status: success/failed | No | + +#### WorkflowRunForArchivedLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| elapsed_time | | | No | +| id | string | | Yes | +| status | | | No | +| total_tokens | | | No | +| triggered_from | | | No | + +#### WorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last run ID for pagination | No | +| limit | integer | Number of items per page (1-100) | No | +| status | | Workflow run status filter | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunNodeExecution + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| execution_metadata | object | | No | +| extras | object | | No | +| finished_at | object | | No | +| id | string | | No | +| index | integer | | No | +| inputs | object | | No | +| inputs_truncated | boolean | | No | +| node_id | string | | No | +| node_type | string | | No | +| outputs | object | | No | +| outputs_truncated | boolean | | No | +| predecessor_node_id | string | | No | +| process_data | object | | No | +| process_data_truncated | boolean | | No | +| status | string | | No | +| title | string | | No | + +#### WorkflowRunNodeExecutionList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunNodeExecution](#workflowrunnodeexecution) ] | | No | + +#### WorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunForList](#workflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowRunQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### WorkflowRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowStatisticQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date and time (YYYY-MM-DD HH:MM) | No | +| start | | Start date and time (YYYY-MM-DD HH:MM) | No | + +#### WorkflowToolCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_app_id | string | | Yes | + +#### WorkflowToolDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| workflow_tool_id | string | | Yes | + +#### WorkflowToolParameterConfiguration + +Workflow tool configuration + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | The description of the parameter | Yes | +| form | [ToolParameterForm](#toolparameterform) | The form of the parameter | Yes | +| name | string | The name of the parameter | Yes | + +#### WorkflowToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_tool_id | string | | Yes | + +#### WorkflowTriggerListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowTriggerResponse](#workflowtriggerresponse) ] | | Yes | + +#### WorkflowTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| icon | string | | Yes | +| id | string | | Yes | +| node_id | string | | Yes | +| provider_name | string | | Yes | +| status | string | | Yes | +| title | string | | Yes | +| trigger_type | string | | Yes | +| updated_at | | | No | + +#### WorkflowUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### WorkspaceCustomConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| remove_webapp_brand | | | No | +| replace_webapp_logo | | | No | + +#### WorkspaceInfoPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### WorkspaceListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| page | integer | | No | + +#### _AnonymousInlineModel_55c39c6a4b9e + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | + +#### _AnonymousInlineModel_6fec07cd0d85 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar_url | object | | No | +| email | string | | No | +| id | string | | No | +| name | string | | No | + +#### _AnonymousInlineModel_b1954337d565 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable | boolean | | No | +| model_name | string | | No | +| model_provider_name | string | | No | +| summary_prompt | string | | No | + +#### _AnonymousInlineModel_f7ff64cce858 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mentioned_user_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| mentioned_user_id | string | | No | +| reply_id | string | | No | + +## FastOpenAPI Preview (OpenAPI 3.0) + +### Dify API (FastOpenAPI PoC) +FastOpenAPI proof of concept for Dify API + +#### Version: 1.0 + +--- + +##### [GET] /console/api/init +**Get initialization validation status.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [InitStatusResponse](#initstatusresponse)
| + +##### [POST] /console/api/init +**Validate initialization password.** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [InitValidatePayload](#initvalidatepayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [InitValidateResponse](#initvalidateresponse)
| + +##### [GET] /console/api/ping +**Health check endpoint for connection testing.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [PingResponse](#pingresponse)
| + +##### [GET] /console/api/setup +**Get system setup status. + + NOTE: This endpoint is unauthenticated by design. + + During first-time bootstrap there is no admin account yet, so frontend initialization must be + able to query setup progress before any login flow exists. + + Only bootstrap-safe status information should be returned by this endpoint. + ** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [SetupStatusResponse](#setupstatusresponse)
| + +##### [POST] /console/api/setup +**Initialize system setup with admin account. + + NOTE: This endpoint is unauthenticated by design for first-time bootstrap. + Access is restricted by deployment mode (`SELF_HOSTED`), one-time setup guards, + and init-password validation rather than user session authentication. + ** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [SetupRequestPayload](#setuprequestpayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [SetupResponse](#setupresponse)
| + +##### [GET] /console/api/version +**Check for application version updates.** + +###### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| current_version | query | | Yes | string | + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [VersionResponse](#versionresponse)
| + +--- +##### Schemas + +###### ErrorSchema + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| error | { **"details"**: string, **"message"**: string, **"status"**: integer, **"type"**: string } | | Yes | + +###### InitStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | string,
**Available values:** "finished", "not_started" | Initialization status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### InitValidatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| password | string | Initialization password | Yes | + +###### InitValidateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +###### PingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Health check result | Yes | + +###### SetupRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Admin email address | Yes | +| language | | Admin language | No | +| name | string | Admin name (max 30 characters) | Yes | +| password | string | Admin password | Yes | + +###### SetupResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Setup result | Yes | + +###### SetupStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| setup_at | | Setup completion time (ISO format) | No | +| step | string,
**Available values:** "finished", "not_started" | Setup step status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### VersionFeatures + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_replace_logo | boolean | Whether logo replacement is supported | Yes | +| model_load_balancing_enabled | boolean | Whether model load balancing is enabled | Yes | + +###### VersionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_auto_update | boolean | Whether auto-update is supported | Yes | +| features | [VersionFeatures](#versionfeatures) | Feature flags and capabilities | Yes | +| release_date | string | Release date of latest version | Yes | +| release_notes | string | Release notes for latest version | Yes | +| version | string | Latest version number | Yes | diff --git a/api/openapi/markdown/service-swagger.md b/api/openapi/markdown/service-swagger.md new file mode 100644 index 0000000000..ec5ed280f5 --- /dev/null +++ b/api/openapi/markdown/service-swagger.md @@ -0,0 +1,2754 @@ +# Service API +API for application services + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## service_api +Service operations + +### / + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/feedbacks + +#### GET +##### Summary + +Get all feedbacks for the application + +##### Description + +Get all feedbacks for the application +Returns paginated list of all feedback submitted for messages in this app. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackListQuery](#feedbacklistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedbacks retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action} + +#### POST +##### Summary + +Enable or disable annotation reply feature + +##### Description + +Enable or disable annotation reply feature + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyActionPayload](#annotationreplyactionpayload) | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action}/status/{job_id} + +#### GET +##### Summary + +Get the status of an annotation reply action job + +##### Description + +Get the status of an annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Job not found | + +### /apps/annotations + +#### GET +##### Summary + +List annotations for the application + +##### Description + +List annotations for the application + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations retrieved successfully | [AnnotationList](#annotationlist) | +| 401 | Unauthorized - invalid API token | | + +#### POST +##### Summary + +Create a new annotation + +##### Description + +Create a new annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | + +### /apps/annotations/{annotation_id} + +#### DELETE +##### Summary + +Delete an annotation + +##### Description + +Delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Annotation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Annotation not found | + +#### PUT +##### Summary + +Update an existing annotation + +##### Description + +Update an existing annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | +| 403 | Forbidden - insufficient permissions | | +| 404 | Annotation not found | | + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text using speech-to-text + +##### Description + +Convert audio to text using speech-to-text +Accepts an audio file upload and returns the transcribed text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Audio successfully transcribed | +| 400 | Bad request - no audio or invalid audio | +| 401 | Unauthorized - invalid API token | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal server error | + +### /chat-messages + +#### POST +##### Summary + +Send a message in a chat conversation + +##### Description + +Send a message in a chat conversation +This endpoint handles chat messages for chat, agent chat, and advanced chat applications. +Supports conversation management and both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatRequestPayload](#chatrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message sent successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running chat message generation + +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /completion-messages + +#### POST +##### Summary + +Create a completion for the given prompt + +##### Description + +Create a completion for the given prompt +This endpoint generates a completion based on the provided inputs and query. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionRequestPayload](#completionrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | +| 500 | Internal server error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running completion task + +##### Description + +Stop a running completion task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /conversations + +#### GET +##### Summary + +List all conversations for the current user + +##### Description + +List all conversations for the current user +Supports pagination using last_id and limit parameters. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversations retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Last conversation not found | + +### /conversations/{c_id} + +#### DELETE +##### Summary + +Delete a specific conversation + +##### Description + +Delete a specific conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/name + +#### POST +##### Summary + +Rename a conversation or auto-generate a name + +##### Description + +Rename a conversation or auto-generate a name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/variables + +#### GET +##### Summary + +List all variables for a conversation + +##### Description + +List all variables for a conversation +Conversational variables are only available for chat applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variables retrieved successfully | [ConversationVariableInfiniteScrollPaginationResponse](#conversationvariableinfinitescrollpaginationresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation not found | | + +### /conversations/{c_id}/variables/{variable_id} + +#### PUT +##### Summary + +Update a conversation variable's value + +##### Description + +Update a conversation variable's value +Allows updating the value of a specific conversation variable. +The value must match the variable's expected type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| c_id | path | Conversation ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [ConversationVariableResponse](#conversationvariableresponse) | +| 400 | Bad request - type mismatch | | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation or variable not found | | + +### /datasets + +#### GET +##### Summary + +Resource for getting datasets + +##### Description + +List all datasets + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### POST +##### Summary + +Resource for creating datasets + +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/pipeline/file-upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file to a knowledgebase pipeline +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | File uploaded successfully | +| 400 | Bad request - no file or invalid file | +| 401 | Unauthorized - invalid API token | +| 413 | File too large | +| 415 | Unsupported file type | + +### /datasets/tags + +#### DELETE +##### Summary + +Delete a knowledge type tag + +##### Description + +Delete a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagDeletePayload](#tagdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tag deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get all knowledge type tags + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### PATCH +##### Description + +Update a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUpdatePayload](#tagupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### POST +##### Summary + +Add a knowledge type tag + +##### Description + +Add a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagCreatePayload](#tagcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag created successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/binding + +#### POST +##### Description + +Bind tags to a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags bound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/unbinding + +#### POST +##### Description + +Unbind tags from a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUnbindingPayload](#tagunbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags unbound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/{dataset_id} + +#### DELETE +##### Summary + +Deletes a dataset given its ID + +##### Description + +Delete a dataset +Args: + _: ignore + dataset_id (UUID): The ID of the dataset to be deleted. + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + if the dataset was successfully deleted. Omitted in HTTP response. + int: HTTP status code 204 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Dataset deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | +| 409 | Conflict - dataset is in use | + +#### GET +##### Description + +Get a specific dataset by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +#### PATCH +##### Description + +Update an existing dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/document/create-by-file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create-by-text + +#### POST +##### Description + +Create a new document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/documents + +#### GET +##### Description + +List all documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Description + +Download selected uploaded documents as a single ZIP archive + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | ZIP archive generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Summary + +Update metadata for multiple documents + +##### Description + +Update metadata for multiple documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/status/{action} + +#### PATCH +##### Summary + +Batch update document status + +##### Description + +Batch update document status +Args: + tenant_id: tenant id + dataset_id: dataset id + action: action to perform (Literal["enable", "disable", "archive", "un_archive"]) + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + int: HTTP status code 200 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + Forbidden: If the user does not have permission. + InvalidActionError: If the action is invalid or cannot be performed. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable', 'disable', 'archive', or 'un_archive' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document status updated successfully | +| 400 | Bad request - invalid action | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/{batch}/indexing-status + +#### GET +##### Description + +Get indexing status for documents in a batch + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | Batch ID | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or documents not found | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Summary + +Delete document + +##### Description + +Delete a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Document deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - document is archived | +| 404 | Document not found | + +#### GET +##### Description + +Get a specific document by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document not found | + +#### PATCH +##### Description + +Update an existing document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Download URL generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or upload file not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### GET +##### Description + +List segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +#### POST +##### Description + +Create segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments created successfully | +| 400 | Bad request - segments data is missing | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Description + +Delete a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Segment deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### GET +##### Description + +Get a specific segment by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Update a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to update | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Description + +List child chunks for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunks retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Create a new child chunk for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Description + +Delete a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | Child chunk ID to delete | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Child chunk deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +#### PATCH +##### Description + +Update a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | +| child_chunk_id | path | Child chunk ID to update | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-text + +#### POST +##### Description + +Update an existing document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Summary + +Get all metadata for a dataset + +##### Description + +Get all metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +#### POST +##### Summary + +Create metadata for a dataset + +##### Description + +Create metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Metadata created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/built-in + +#### GET +##### Summary + +Get all built-in metadata fields + +##### Description + +Get all built-in metadata fields + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Built-in fields retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Summary + +Enable or disable built-in metadata field + +##### Description + +Enable or disable built-in metadata field + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Summary + +Delete metadata + +##### Description + +Delete metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Metadata deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +#### PATCH +##### Summary + +Update metadata name + +##### Description + +Update metadata name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +### /datasets/{dataset_id}/pipeline/datasource-plugins + +#### GET +##### Summary + +Resource for getting datasource plugins + +##### Description + +List all datasource plugins for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| is_published | query | Whether to get published or draft datasource plugins (true for published, false for draft, default: true) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource plugins retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Resource for getting datasource plugins + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource node run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/run + +#### POST +##### Summary + +Resource for running a rag pipeline + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Pipeline run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/retrieve + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/tags + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get tags bound to a specific dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /end-users/{end_user_id} + +#### GET +##### Summary + +Get end user detail + +##### Description + +Get an end user by ID +This endpoint is scoped to the current app token's tenant/app to prevent +cross-tenant/app access when an end-user ID is known. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| end_user_id | path | End user ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | End user retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | End user not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file for use in conversations +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - no file or invalid file | | +| 401 | Unauthorized - invalid API token | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /files/{file_id}/preview + +#### GET +##### Summary + +Preview/Download a file that was uploaded via Service API + +##### Description + +Preview or download a file uploaded via Service API +Provides secure file preview/download functionality. +Files can only be accessed if they belong to messages within the requesting app's context. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FilePreviewQuery](#filepreviewquery) | +| file_id | path | UUID of the file to preview | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | File retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - file access denied | +| 404 | File not found | + +### /form/human_input/{form_token} + +#### GET +##### Description + +Get a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +#### POST +##### Description + +Submit a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form submitted successfully | +| 400 | Bad request - invalid submission data | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +### /info + +#### GET +##### Summary + +Get app information + +##### Description + +Get basic application information +Returns basic information about the application including name, description, tags, and mode. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application info retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /messages + +#### GET +##### Summary + +List messages in a conversation + +##### Description + +List messages in a conversation +Retrieves messages with pagination support using first_id. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Messages retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or first message not found | + +### /messages/{message_id}/feedbacks + +#### POST +##### Summary + +Submit feedback for a message + +##### Description + +Submit feedback for a message +Allows users to rate messages as like/dislike and provide optional feedback content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | + +### /messages/{message_id}/suggested + +#### GET +##### Summary + +Get suggested follow-up questions for a message + +##### Description + +Get suggested follow-up questions for a message +Returns AI-generated follow-up questions based on the message content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Suggested questions retrieved successfully | +| 400 | Suggested questions feature is disabled | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | +| 500 | Internal server error | + +### /meta + +#### GET +##### Summary + +Get app metadata + +##### Description + +Get application metadata +Returns metadata about the application including configuration and settings. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve application input parameters and configuration +Returns the input form parameters and configuration for the application. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Parameters retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Get application site configuration +Returns the site configuration for the application including theme, icons, and text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Site configuration retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - site not found or tenant archived | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio using text-to-speech + +##### Description + +Convert text to audio using text-to-speech +Converts the provided text to audio using the specified voice. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text successfully converted to audio | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 500 | Internal server error | + +### /workflow/{task_id}/events + +#### GET +##### Description + +Get workflow execution events stream after resume + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Workflow run ID | Yes | string | +| continue_on_pause | query | Whether to keep the stream open across workflow_paused events,specify `"true"` to keep the stream open for `workflow_paused` events. | No | string | +| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string | +| user | query | End user identifier (query param) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | SSE event stream | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow run not found | + +### /workflows/logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow execution logs +Returns paginated workflow execution logs with filtering options. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowLogQuery](#workflowlogquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | +| 401 | Unauthorized - invalid API token | | + +### /workflows/run + +#### POST +##### Summary + +Execute a workflow + +##### Description + +Execute a workflow +Runs a workflow with the provided inputs and returns the results. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workflows/run/{workflow_run_id} + +#### GET +##### Summary + +Get a workflow task running detail + +##### Description + +Get workflow run details +Returns detailed information about a specific workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run details retrieved successfully | [WorkflowRunResponse](#workflowrunresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Workflow run not found | | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop a running workflow task + +##### Description + +Stop a running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /workflows/{workflow_id}/run + +#### POST +##### Summary + +Run specific workflow by ID + +##### Description + +Execute a specific workflow by ID +Executes a specific workflow version identified by its ID. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | +| workflow_id | path | Workflow ID to execute | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Summary + +Get available models by model type + +##### Description + +Get available models by model type +Returns a list of available models for the specified model type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | Type of model to retrieve | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Models retrieved successfully | +| 401 | Unauthorized - invalid API token | + +--- +### Models + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | Annotation answer | Yes | +| question | string | Annotation question | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationReplyActionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### ChatRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate_name | boolean | Auto generate conversation name | No | +| conversation_id | | Conversation UUID | No | +| files | | | No | +| inputs | object | | Yes | +| query | string | | Yes | +| response_mode | | | No | +| retriever_from | string | | No | +| workflow_id | | Workflow ID for advanced chat | No | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CompletionRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last conversation ID for pagination | No | +| limit | integer | Number of conversations to return | No | +| sort_by | string | Sort order for conversations
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariableInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| value | | | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last variable ID for pagination | No | +| limit | integer | Number of variables to return | No | +| variable_name | | Filter variables by name | No | + +#### DataSetTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| indexing_technique | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| status | | Document status filter | No | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentTextCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | string | | Yes | + +#### DocumentTextUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| name | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | | | No | + +#### FeedbackListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Number of feedbacks per page | No | +| page | integer | Page number | No | + +#### FilePreviewQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| as_attachment | boolean | Download as attachment | No | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| inputs | object | | Yes | + +#### JsonValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JsonValue | | | | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### PipelineRunApiEntity + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | +| response_mode | string | | Yes | +| start_node_id | string | | Yes | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segments | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| status | [ string ] | | No | + +#### SegmentUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | | | No | +| enabled | | | No | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segment | [SegmentUpdateArgs](#segmentupdateargs) | | Yes | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | | Yes | +| target_id | string | | Yes | + +#### TagCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### TagDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | string | | Yes | + +#### TagUnbindingPayload + +Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | | | No | +| tag_ids | [ string ] | | No | +| target_id | string | | Yes | + +#### TagUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| tag_id | string | | Yes | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | | No | +| created_at__before | | | No | +| created_by_account | | | No | +| created_by_end_user_session_id | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| response_mode | | | No | + +#### WorkflowRunResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| finished_at | | | No | +| id | string | | Yes | +| inputs | | | No | +| outputs | object | | No | +| status | string | | Yes | +| total_steps | | | No | +| total_tokens | | | No | +| workflow_id | string | | Yes | diff --git a/api/openapi/markdown/web-swagger.md b/api/openapi/markdown/web-swagger.md new file mode 100644 index 0000000000..c9b3b31357 --- /dev/null +++ b/api/openapi/markdown/web-swagger.md @@ -0,0 +1,1224 @@ +# Web API +Public APIs for web applications including file uploads, chat interactions, and app management + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## web +Web application API operations + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text + +##### Description + +Convert audio file to text using speech-to-text service. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal Server Error | + +### /chat-messages + +#### POST +##### Description + +Create a chat message for conversational applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /completion-messages + +#### POST +##### Description + +Create a completion message for text generation applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /conversations + +#### GET +##### Description + +Retrieve paginated list of conversations for a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last conversation ID for pagination | No | string | +| limit | query | Number of conversations to return (1-100) | No | integer | +| pinned | query | Filter by pinned status | No | string | +| sort_by | query | Sort order | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id} + +#### DELETE +##### Description + +Delete a specific conversation. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/name + +#### POST +##### Description + +Rename a specific conversation with a custom name or auto-generate one. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | +| auto_generate | query | Auto-generate conversation name | No | boolean | +| name | query | New conversation name | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/pin + +#### PATCH +##### Description + +Pin a specific conversation to keep it at the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation pinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/unpin + +#### PATCH +##### Description + +Unpin a specific conversation to remove it from the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation unpinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /email-code-login + +#### POST +##### Description + +Send email verification code for login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginSendPayload](#emailcodeloginsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | + +### /email-code-login/validity + +#### POST +##### Description + +Verify email code and complete login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginVerifyPayload](#emailcodeloginverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code verified and login successful | +| 400 | Bad request - invalid code or token | +| 401 | Invalid token or expired code | +| 404 | Account not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in web applications + +##### Description + +Upload a file for use in web applications +Accepts file uploads for use within web applications, supporting +multiple file types with automatic validation and storage. + +Args: + app_model: The associated application model + end_user: The end user uploading the file + +Form Parameters: + file: The file to upload (required) + source: Optional source type (datasets or None) + +Returns: + dict: File information including ID, URL, and metadata + int: HTTP status code 201 for success + +Raises: + NoFileUploadedError: No file provided in request + TooManyFilesError: Multiple files provided (only one allowed) + FilenameNotExistsError: File has no filename + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - invalid file or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset email sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | +| 429 | Too many requests - rate limit exceeded | + +### /forgot-password/resets + +#### POST +##### Description + +Reset user password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset successfully | +| 400 | Bad request - invalid parameters or password mismatch | +| 401 | Invalid or expired token | +| 404 | Account not found | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset token validity + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Token is valid | +| 400 | Bad request - invalid token format | +| 401 | Invalid or expired token | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by token + +##### Description + +GET /api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by token + +##### Description + +POST /api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Description + +Authenticate user for web application access + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Authentication successful | +| 400 | Bad request - invalid email or password format | +| 401 | Authentication failed - email or password mismatch | +| 403 | Account banned or login disabled | +| 404 | Account not found | + +### /login/status + +#### GET +##### Description + +Check login status + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Login status | +| 401 | Login status | + +### /logout + +#### POST +##### Description + +Logout user from web application + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Logout successful | + +### /messages + +#### GET +##### Description + +Retrieve paginated list of messages from a conversation in a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| conversation_id | query | Conversation UUID | Yes | string | +| first_id | query | First message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /messages/{message_id}/feedbacks + +#### POST +##### Description + +Submit feedback (like/dislike) for a specific message. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | +| content | query | Feedback content | No | string | +| rating | query | Feedback rating | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/more-like-this + +#### GET +##### Description + +Generate a new completion similar to an existing message (completion apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageMoreLikeThisQuery](#messagemorelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested follow-up questions after a message (chat apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a chat app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found or Conversation Not Found | +| 500 | Internal Server Error | + +### /meta + +#### GET +##### Summary + +Get app meta + +##### Description + +Retrieve the metadata for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve the parameters for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /passport + +#### GET +##### Description + +Get authentication passport for web application access + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Passport retrieved successfully | +| 401 | Unauthorized - missing app code or invalid authentication | +| 404 | Application or user not found | + +### /remote-files/upload + +#### POST +##### Summary + +Upload a file from a remote URL + +##### Description + +Upload a file from a remote URL +Downloads a file from the provided remote URL and uploads it +to the platform storage for use in web applications. + +Args: + app_model: The associated application model + end_user: The end user making the request + +JSON Parameters: + url: The remote URL to download the file from (required) + +Returns: + dict: File information including ID, signed URL, and metadata + int: HTTP status code 201 for success + +Raises: + RemoteFileUploadError: Failed to fetch file from remote URL + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Remote file uploaded successfully | [FileWithSignedUrl](#filewithsignedurl) | +| 400 | Bad request - invalid URL or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | +| 500 | Failed to fetch remote file | | + +### /remote-files/{url} + +#### GET +##### Summary + +Get information about a remote file + +##### Description + +Get information about a remote file +Retrieves basic information about a file located at a remote URL, +including content type and content length. + +Args: + app_model: The associated application model + end_user: The end user making the request + url: URL-encoded path to the remote file + +Returns: + dict: Remote file information including type and length + +Raises: + HTTPException: If the remote file cannot be accessed + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Remote file information retrieved successfully | [RemoteFileInfo](#remotefileinfo) | +| 400 | Bad request - invalid URL | | +| 404 | Remote file not found | | +| 500 | Failed to fetch remote file | | + +### /saved-messages + +#### GET +##### Description + +Retrieve paginated list of saved messages for a completion application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +#### POST +##### Description + +Save a specific message for later reference. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | query | Message UUID to save | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message saved successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /saved-messages/{message_id} + +#### DELETE +##### Description + +Remove a message from saved messages. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Message removed successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Retrieve app site information and configuration. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /system-features + +#### GET +##### Summary + +Get system feature flags and configuration + +##### Description + +Get system feature flags and configuration +Returns the current system feature flags and configuration +that control various functionalities across the platform. + +Returns: + dict: System feature configuration object + +This endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py, +except it is intended for use by the web app, instead of the console dashboard. + +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for webapp initialization. + +Authentication would create circular dependency (can't authenticate without webapp loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | System features retrieved successfully | +| 500 | Internal server error | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio + +##### Description + +Convert text to audio using text-to-speech service. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 500 | Internal Server Error | + +### /webapp/access-mode + +#### GET +##### Description + +Retrieve the access mode for a web application (public or restricted). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appCode | query | Application code | No | string | +| appId | query | Application ID | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 500 | Internal Server Error | + +### /webapp/permission + +#### GET +##### Description + +Check if user has permission to access a web application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appId | query | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 500 | Internal Server Error | + +### /workflows/run + +#### POST +##### Summary + +Run workflow + +##### Description + +Execute a workflow with provided inputs and files. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop a running workflow task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +--- +## default +Default namespace + +### /workflow/{task_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### AppAccessModeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| appCode | | Application code | No | +| appId | | Application ID | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Files to be processed | No | +| inputs | object | Input variables for the chat | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query/message | Yes | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Files to be processed | No | +| inputs | object | Input variables for the completion | Yes | +| query | string | Query text for completion | No | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | +| sort_by | string | *Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### EmailCodeLoginSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailCodeLoginVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### FileWithSignedUrl + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| size | integer | | Yes | +| url | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| password | string | | Yes | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MessageMoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | Yes | + +#### RemoteFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_length | integer | | Yes | +| file_type | string | | Yes | + +#### RemoteFileUploadPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| url | string (uri) | Remote file URL | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index 1b97746dea..0900dfda97 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -3,6 +3,7 @@ from collections.abc import Mapping from typing import Any, cast from unittest.mock import MagicMock +import pytest from dify_trace_aliyun.entities.semconv import ( GEN_AI_FRAMEWORK, GEN_AI_SESSION_ID, @@ -31,7 +32,7 @@ from graphon.enums import WorkflowNodeExecutionStatus from models import EndUser -def test_get_user_id_from_message_data_no_end_user(monkeypatch): +def test_get_user_id_from_message_data_no_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = None @@ -39,7 +40,7 @@ def test_get_user_id_from_message_data_no_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "account_id" -def test_get_user_id_from_message_data_with_end_user(monkeypatch): +def test_get_user_id_from_message_data_with_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -57,7 +58,7 @@ def test_get_user_id_from_message_data_with_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "session_id" -def test_get_user_id_from_message_data_end_user_not_found(monkeypatch): +def test_get_user_id_from_message_data_end_user_not_found(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -111,7 +112,7 @@ def test_get_workflow_node_status(): assert status.status_code == StatusCode.UNSET -def test_create_links_from_trace_id(monkeypatch): +def test_create_links_from_trace_id(monkeypatch: pytest.MonkeyPatch): # Mock create_link mock_link = MagicMock(spec=Link) import dify_trace_aliyun.data_exporter.traceclient diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py index 952f10c34f..95e27c791f 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py @@ -40,7 +40,7 @@ def langfuse_config(): @pytest.fixture -def trace_instance(langfuse_config, monkeypatch): +def trace_instance(langfuse_config, monkeypatch: pytest.MonkeyPatch): # Mock Langfuse client to avoid network calls mock_client = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", lambda **kwargs: mock_client) @@ -49,7 +49,7 @@ def trace_instance(langfuse_config, monkeypatch): return instance -def test_init(langfuse_config, monkeypatch): +def test_init(langfuse_config, monkeypatch: pytest.MonkeyPatch): mock_langfuse = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", mock_langfuse) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -64,7 +64,7 @@ def test_init(langfuse_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -114,7 +114,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info trace_info = WorkflowTraceInfo( workflow_id="wf-1", @@ -218,7 +218,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert other_span.level == LevelEnum.ERROR -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -259,7 +259,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): assert trace_data.name == TraceTaskName.WORKFLOW_TRACE -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -287,7 +287,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -331,7 +331,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): assert gen_data.usage.total == 30 -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -636,7 +636,7 @@ def test_langfuse_trace_entity_with_list_dict_input(): assert data.input[0]["content"] == "hello" -def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): # Setup trace info to trigger LLM node usage extraction trace_info = WorkflowTraceInfo( workflow_id="wf-1", diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py index 45e5894e4a..ee59acb17e 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py @@ -35,7 +35,7 @@ def langsmith_config(): @pytest.fixture -def trace_instance(langsmith_config, monkeypatch): +def trace_instance(langsmith_config, monkeypatch: pytest.MonkeyPatch): # Mock LangSmith client mock_client = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", lambda **kwargs: mock_client) @@ -44,7 +44,7 @@ def trace_instance(langsmith_config, monkeypatch): return instance -def test_init(langsmith_config, monkeypatch): +def test_init(langsmith_config, monkeypatch: pytest.MonkeyPatch): mock_client_class = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", mock_client_class) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -57,7 +57,7 @@ def test_init(langsmith_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -107,7 +107,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace(trace_instance, monkeypatch): +def test_workflow_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info workflow_data = MagicMock() workflow_data.created_at = _dt() @@ -223,7 +223,7 @@ def test_workflow_trace(trace_instance, monkeypatch): assert call_args[4].run_type == LangSmithRunType.retriever -def test_workflow_trace_no_start_time(trace_instance, monkeypatch): +def test_workflow_trace_no_start_time(trace_instance, monkeypatch: pytest.MonkeyPatch): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) @@ -266,7 +266,7 @@ def test_workflow_trace_no_start_time(trace_instance, monkeypatch): assert trace_instance.add_run.called -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = MagicMock(spec=WorkflowTraceInfo) trace_info.trace_id = "trace-1" trace_info.message_id = None @@ -290,7 +290,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace(trace_instance, monkeypatch): +def test_message_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -516,7 +516,7 @@ def test_update_run_error(trace_instance): trace_instance.update_run(update_data) -def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 46c9750a5d..324f894b25 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -614,7 +614,7 @@ class TestMessageTrace: span.set_status.assert_called_once() span.add_event.assert_called_once() - def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch: pytest.MonkeyPatch): span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" diff --git a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py index eefed3c78c..5daaa7132c 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py @@ -35,7 +35,7 @@ def opik_config(): @pytest.fixture -def trace_instance(opik_config, monkeypatch): +def trace_instance(opik_config, monkeypatch: pytest.MonkeyPatch): mock_client = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", lambda **kwargs: mock_client) @@ -65,7 +65,7 @@ def test_prepare_opik_uuid(): assert result is not None -def test_init(opik_config, monkeypatch): +def test_init(opik_config, monkeypatch: pytest.MonkeyPatch): mock_opik = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", mock_opik) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -82,7 +82,7 @@ def test_init(opik_config, monkeypatch): assert instance.project == opik_config.project -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -132,7 +132,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "fb05c7cd-6cec-4add-8a84-df03a408b4ce" WORKFLOW_RUN_ID = "33c67568-7a8a-450e-8916-a5f135baeaef" @@ -221,7 +221,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert trace_instance.add_span.call_count >= 1 -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "f0708b36-b1d7-42b3-a876-1d01b7d8f1a3" WORKFLOW_RUN_ID = "d42ec285-c2fd-4248-8866-5c9386b101ac" @@ -265,7 +265,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): trace_instance.add_trace.assert_called_once() -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="5745f1b8-f8e6-4859-8110-996acb6c8d6a", tenant_id="tenant-1", @@ -293,7 +293,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability MESSAGE_DATA_ID = "e3a26712-8cac-4a25-94a4-a3bff21ee3ab" CONVERSATION_ID = "9d3f3751-7521-4c19-9307-20e3cf6789a3" @@ -340,7 +340,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): trace_instance.add_span.assert_called_once() -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "85411059-79fb-4deb-a76c-c2e215f1b97e" message_data.from_account_id = "acc-1" @@ -614,7 +614,7 @@ def test_get_project_url_error(trace_instance): trace_instance.get_project_url() -def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): trace_info = WorkflowTraceInfo( workflow_id="86a52565-4a6b-4a1b-9bfd-98e4595e70de", tenant_id="66e8e918-472e-4b69-8051-12502c34fc07", diff --git a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py index 6028d0c550..30646815d8 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py @@ -267,14 +267,14 @@ class TestInit: with pytest.raises(ValueError, match="Weave login failed"): WeaveDataTrace(config) - def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL is read from environment.""" monkeypatch.setenv("FILES_URL", "http://files.example.com") config = _make_weave_config() instance = WeaveDataTrace(config) assert instance.file_base_url == "http://files.example.com" - def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL defaults to http://127.0.0.1:5001.""" monkeypatch.delenv("FILES_URL", raising=False) config = _make_weave_config() @@ -302,7 +302,7 @@ class TestGetProjectUrl: url = instance.get_project_url() assert url == "https://wandb.ai/my-project" - def test_get_project_url_exception_raises(self, trace_instance, monkeypatch): + def test_get_project_url_exception_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when exception occurs in get_project_url.""" monkeypatch.setattr(trace_instance, "entity", None) monkeypatch.setattr(trace_instance, "project_name", None) @@ -583,7 +583,7 @@ class TestFinishCall: class TestWorkflowTrace: - def _setup_repo(self, monkeypatch, nodes=None): + def _setup_repo(self, monkeypatch: pytest.MonkeyPatch, nodes=None): """Helper to patch session/repo dependencies.""" if nodes is None: nodes = [] @@ -599,7 +599,7 @@ class TestWorkflowTrace: monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) return repo - def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with no nodes and no message_id.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -614,7 +614,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 assert trace_instance.finish_call.call_count == 1 - def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with message_id creates both message and workflow runs.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -629,7 +629,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch): + def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace iterates node executions and creates node runs.""" node = _make_node( id="node-1", @@ -652,7 +652,7 @@ class TestWorkflowTrace: # workflow run + node run = 2 calls assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch): + def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """LLM node uses process_data prompts as inputs.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -680,7 +680,7 @@ class TestWorkflowTrace: # The key "messages" should be present (validator transforms the list) assert "messages" in node_run.inputs - def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch): + def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Non-LLM node uses node_execution.inputs directly.""" node = _make_node( node_type=BuiltinNodeTypes.TOOL, @@ -701,7 +701,7 @@ class TestWorkflowTrace: node_run = node_call_args[0][0] assert node_run.inputs.get("tool_input") == "val" - def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch): + def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when app_id is missing from metadata.""" monkeypatch.setattr("dify_trace_weave.weave_trace.sessionmaker", lambda bind: MagicMock()) monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) @@ -714,7 +714,7 @@ class TestWorkflowTrace: with pytest.raises(ValueError, match="No app_id found in trace_info metadata"): trace_instance.workflow_trace(trace_info) - def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch): + def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """start_time defaults to datetime.now() when None.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -727,7 +727,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 - def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch): + def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Node with created_at=None uses datetime.now().""" node = _make_node(created_at=None, elapsed_time=0.5) self._setup_repo(monkeypatch, nodes=[node]) @@ -740,7 +740,7 @@ class TestWorkflowTrace: trace_instance.workflow_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch): + def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Chat mode LLM node adds ls_provider and ls_model_name to attributes.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -765,7 +765,7 @@ class TestWorkflowTrace: assert node_run.attributes.get("ls_provider") == "openai" assert node_run.attributes.get("ls_model_name") == "gpt-4" - def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch): + def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Nodes are sorted by created_at before processing.""" node1 = _make_node(id="node-b", created_at=_dt() + timedelta(seconds=2)) node2 = _make_node(id="node-a", created_at=_dt()) @@ -799,7 +799,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) trace_instance.start_call.assert_not_called() - def test_basic_message_trace(self, trace_instance, monkeypatch): + def test_basic_message_trace(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace creates message run and llm child run.""" monkeypatch.setattr( "dify_trace_weave.weave_trace.db.session.get", @@ -816,7 +816,7 @@ class TestMessageTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_message_trace_with_file_data(self, trace_instance, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace appends file URL to file_list.""" file_data = MagicMock() file_data.url = "path/to/file.png" @@ -839,7 +839,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert "http://files.test/path/to/file.png" in message_run.file_list - def test_message_trace_with_end_user(self, trace_instance, monkeypatch): + def test_message_trace_with_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace looks up end user and sets end_user_id attribute.""" end_user = MagicMock() end_user.session_id = "session-xyz" @@ -862,7 +862,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.attributes.get("end_user_id") == "session-xyz" - def test_message_trace_no_end_user(self, trace_instance, monkeypatch): + def test_message_trace_no_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles when from_end_user_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -880,7 +880,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch): + def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """trace_id falls back to message_id when trace_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -895,7 +895,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.id == "msg-1" - def test_message_trace_file_list_none(self, trace_instance, monkeypatch): + def test_message_trace_file_list_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles file_list=None gracefully.""" mock_db = MagicMock() mock_db.session.get.return_value = None diff --git a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py index a907f918c3..37b2331f0f 100644 --- a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py +++ b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py @@ -20,7 +20,7 @@ def test_validate_distance_function_rejects_unsupported_values(): factory._validate_distance_function("dot_product") -def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch): +def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-1", @@ -45,7 +45,7 @@ def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch assert vector_cls.call_args.kwargs["collection_name"] == "existing_collection" -def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch): +def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-2", diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py index d1d471761d..2e8052b7dc 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py @@ -83,7 +83,7 @@ def test_get_type_is_analyticdb(): assert vector.get_type() == "analyticdb" -def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): +def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) @@ -109,7 +109,7 @@ def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): assert dataset.index_struct is not None -def test_factory_builds_sql_config_when_host_is_present(monkeypatch): +def test_factory_builds_sql_config_when_host_is_present(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace( id="dataset-2", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py index d2d735ae3e..26bd385333 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py @@ -24,7 +24,7 @@ def _request_class(name: str): return _Request -def _install_openapi_stubs(monkeypatch): +def _install_openapi_stubs(monkeypatch: pytest.MonkeyPatch): gpdb_package = types.ModuleType("alibabacloud_gpdb20160503") gpdb_package.__path__ = [] gpdb_models = types.ModuleType("alibabacloud_gpdb20160503.models") @@ -130,7 +130,7 @@ def test_openapi_config_to_client_params(): assert params["read_timeout"] == 60000 -def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): +def test_init_creates_openapi_client_and_runs_initialize(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) initialize_mock = MagicMock() monkeypatch.setattr(openapi_module.AnalyticdbVectorOpenAPI, "_initialize", initialize_mock) @@ -145,7 +145,7 @@ def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): initialize_mock.assert_called_once_with() -def test_initialize_skips_when_cached(monkeypatch): +def test_initialize_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -164,7 +164,7 @@ def test_initialize_skips_when_cached(monkeypatch): vector._create_namespace_if_not_exists.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -184,7 +184,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_initialize_vector_database_calls_openapi_client(monkeypatch): +def test_initialize_vector_database_calls_openapi_client(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -199,7 +199,7 @@ def test_initialize_vector_database_calls_openapi_client(monkeypatch): assert request.manager_account_password == "password" -def test_create_namespace_creates_when_namespace_not_found(monkeypatch): +def test_create_namespace_creates_when_namespace_not_found(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -211,7 +211,7 @@ def test_create_namespace_creates_when_namespace_not_found(monkeypatch): vector._client.create_namespace.assert_called_once() -def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): +def test_create_namespace_raises_on_unexpected_api_error(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -222,7 +222,7 @@ def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): vector._create_namespace_if_not_exists() -def test_create_namespace_noop_when_namespace_exists(monkeypatch): +def test_create_namespace_noop_when_namespace_exists(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -234,7 +234,7 @@ def test_create_namespace_noop_when_namespace_exists(monkeypatch): vector._client.create_namespace.assert_not_called() -def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): +def test_create_collection_if_not_exists_creates_when_missing(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -255,7 +255,7 @@ def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): +def test_create_collection_if_not_exists_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -274,7 +274,7 @@ def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): vector._client.create_collection.assert_not_called() -def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): +def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -293,7 +293,7 @@ def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): vector.create_collection_if_not_exists(embedding_dimension=512) -def test_openapi_add_delete_and_search_methods(monkeypatch): +def test_openapi_add_delete_and_search_methods(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -348,7 +348,7 @@ def test_openapi_add_delete_and_search_methods(monkeypatch): assert docs_by_text[0].page_content == "high" -def test_text_exists_returns_false_when_matches_empty(monkeypatch): +def test_text_exists_returns_false_when_matches_empty(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -361,7 +361,7 @@ def test_text_exists_returns_false_when_matches_empty(monkeypatch): assert vector.text_exists("missing-id") is False -def test_openapi_delete_success(monkeypatch): +def test_openapi_delete_success(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -372,7 +372,7 @@ def test_openapi_delete_success(monkeypatch): vector._client.delete_collection.assert_called_once() -def test_openapi_delete_propagates_errors(monkeypatch): +def test_openapi_delete_propagates_errors(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py index 49a2ae72d0..cd255b37cf 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py @@ -53,7 +53,7 @@ def test_sql_config_rejects_min_connection_greater_than_max_connection(): AnalyticdbVectorBySqlConfig.model_validate(values) -def test_initialize_skips_when_cache_exists(monkeypatch): +def test_initialize_skips_when_cache_exists(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -70,7 +70,7 @@ def test_initialize_skips_when_cache_exists(monkeypatch): vector._initialize_vector_database.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -88,7 +88,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): sql_module.redis_client.set.assert_called_once() -def test_create_connection_pool_uses_psycopg2_pool(monkeypatch): +def test_create_connection_pool_uses_psycopg2_pool(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -119,7 +119,7 @@ def test_get_cursor_context_manager_handles_connection_lifecycle(): pool.putconn.assert_called_once_with(connection) -def test_add_texts_inserts_only_documents_with_metadata(monkeypatch): +def test_add_texts_inserts_only_documents_with_metadata(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.table_name = "dify.collection" @@ -273,7 +273,7 @@ def test_delete_drops_table(): cursor.execute.assert_called_once() -def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch): +def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch: pytest.MonkeyPatch): config = AnalyticdbVectorBySqlConfig(**_config_values()) created_pool = MagicMock() @@ -288,7 +288,7 @@ def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypat assert vector.pool is created_pool -def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch): +def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -326,7 +326,7 @@ def test_initialize_vector_database_handles_existing_database_and_search_config( assert any("CREATE SCHEMA IF NOT EXISTS dify" in call.args[0] for call in worker_cursor.execute.call_args_list) -def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch): +def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -353,7 +353,7 @@ def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(mon worker_connection.rollback.assert_called_once() -def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch): +def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" @@ -381,7 +381,7 @@ def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeyp sql_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch): +def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" diff --git a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py index 851c09f47a..f0dddee3b9 100644 --- a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py +++ b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py @@ -121,7 +121,7 @@ def _build_fake_pymochow_modules(): @pytest.fixture -def baidu_module(monkeypatch): +def baidu_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymochow_modules().items(): monkeypatch.setitem(sys.modules, name, module) import dify_vdb_baidu.baidu_vector as module @@ -254,7 +254,7 @@ def test_search_methods_delegate_to_database_table(baidu_module): assert vector._get_search_res.call_count == 2 -def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch): +def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch: pytest.MonkeyPatch): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) monkeypatch.setattr(baidu_module.Dataset, "gen_collection_name_by_id", lambda _id: "AUTO_COLLECTION") @@ -279,7 +279,7 @@ def test_factory_initializes_collection_name_and_index_struct(baidu_module, monk assert dataset.index_struct is not None -def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch): +def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch: pytest.MonkeyPatch): init_client = MagicMock(return_value="client") init_database = MagicMock(return_value="database") monkeypatch.setattr(baidu_module.BaiduVector, "_init_client", init_client) @@ -372,7 +372,7 @@ def test_get_search_result_handles_invalid_metadata_json(baidu_module): assert "document_id" not in docs[0].metadata -def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch): +def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch: pytest.MonkeyPatch): credentials = MagicMock(return_value="credentials") configuration = MagicMock(return_value="configuration") client_cls = MagicMock(return_value="client") @@ -411,7 +411,7 @@ def test_init_database_raises_for_unknown_create_database_error(baidu_module): vector._init_database() -def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch): +def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -460,7 +460,7 @@ def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypat vector._wait_for_index_ready.assert_called_once_with(table, 3600) -def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch): +def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._db = MagicMock() @@ -493,7 +493,7 @@ def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypat vector._create_table(3) -def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch): +def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -524,7 +524,9 @@ def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, vector._create_table(3) -def test_factory_uses_existing_collection_prefix_when_index_struct_exists(baidu_module, monkeypatch): +def test_factory_uses_existing_collection_prefix_when_index_struct_exists( + baidu_module, monkeypatch: pytest.MonkeyPatch +): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py index b209c9df96..f18f9a6561 100644 --- a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py +++ b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py @@ -44,7 +44,7 @@ def _build_fake_chroma_modules(): @pytest.fixture -def chroma_module(monkeypatch): +def chroma_module(monkeypatch: pytest.MonkeyPatch): fake_chroma = _build_fake_chroma_modules() monkeypatch.setitem(sys.modules, "chromadb", fake_chroma) import dify_vdb_chroma.chroma_vector as module @@ -73,7 +73,7 @@ def test_chroma_config_to_params_builds_expected_payload(chroma_module): assert params["settings"].chroma_client_auth_credentials == "credentials" -def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch): +def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -173,7 +173,7 @@ def test_search_by_full_text_returns_empty_list(chroma_module): assert vector.search_by_full_text("query") == [] -def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch): +def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch: pytest.MonkeyPatch): factory = chroma_module.ChromaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py index a7473f1b91..4f8395e475 100644 --- a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py +++ b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py @@ -45,7 +45,7 @@ def _build_fake_clickzetta_module(): @pytest.fixture -def clickzetta_module(monkeypatch): +def clickzetta_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "clickzetta", _build_fake_clickzetta_module()) import dify_vdb_clickzetta.clickzetta_vector as module @@ -218,7 +218,7 @@ def test_search_by_like_returns_documents_with_default_score(clickzetta_module): assert docs[0].metadata["score"] == 0.5 -def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): +def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch: pytest.MonkeyPatch): factory = clickzetta_module.ClickzettaVectorFactory() dataset = SimpleNamespace(id="dataset-1") @@ -243,7 +243,7 @@ def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): assert vector_cls.call_args.kwargs["collection_name"] == "collection" -def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch): +def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch: pytest.MonkeyPatch): clickzetta_module.ClickzettaConnectionPool._instance = None monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) @@ -255,7 +255,7 @@ def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch assert "username:instance:service:workspace:cluster:dify" in key -def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -274,7 +274,7 @@ def test_connection_pool_create_connection_retries_and_configures(clickzetta_mod pool._configure_connection.assert_called_once_with(connection) -def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -318,7 +318,7 @@ def test_connection_pool_configure_connection_swallows_errors(clickzetta_module) monkeypatch.undo() -def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch): +def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -360,7 +360,7 @@ def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monk assert pool._shutdown is True -def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch): +def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False pool._cleanup_expired_connections = MagicMock(side_effect=lambda: setattr(pool, "_shutdown", True)) @@ -384,7 +384,7 @@ def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module pool._cleanup_expired_connections.assert_called_once() -def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch): +def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() pool.get_connection.return_value = "conn" monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "get_instance", MagicMock(return_value=pool)) @@ -405,7 +405,7 @@ def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypat assert vector._ensure_connection() == "conn" -def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch): +def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch: pytest.MonkeyPatch): class _Thread: def __init__(self, target, daemon): self.target = target @@ -579,7 +579,7 @@ def test_create_inverted_index_branches(clickzetta_module): vector._create_inverted_index(cursor) -def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch): +def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch: pytest.MonkeyPatch): vector = clickzetta_module.ClickzettaVector.__new__(clickzetta_module.ClickzettaVector) vector._config = _config(clickzetta_module) vector._config.batch_size = 2 @@ -811,7 +811,7 @@ def test_clickzetta_pool_cleanup_and_shutdown_edge_paths(clickzetta_module): assert pool._shutdown is True -def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch): +def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False diff --git a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py index 7e5c40b8f2..d474b566d3 100644 --- a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py +++ b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py @@ -150,7 +150,7 @@ def _build_fake_couchbase_modules(): @pytest.fixture -def couchbase_module(monkeypatch): +def couchbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_couchbase_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -194,7 +194,7 @@ def test_init_sets_cluster_handles(couchbase_module): vector._cluster.wait_until_ready.assert_called_once() -def test_create_and_create_collection_branches(couchbase_module, monkeypatch): +def test_create_and_create_collection_branches(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector.__new__(couchbase_module.CouchbaseVector) vector._collection_name = "collection_1" vector._client_config = _config(couchbase_module) @@ -319,7 +319,7 @@ def test_search_methods_and_format_metadata(couchbase_module): assert vector._format_metadata({"metadata.a": 1, "plain": 2}) == {"a": 1, "plain": 2} -def test_delete_collection_and_factory(couchbase_module, monkeypatch): +def test_delete_collection_and_factory(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector("collection_1", _config(couchbase_module)) scopes = [ SimpleNamespace(collections=[SimpleNamespace(name="other")]), diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py index f81ed6beea..91cc2e0fdb 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py @@ -28,7 +28,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_ja_module(monkeypatch): +def elasticsearch_ja_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -39,7 +39,7 @@ def elasticsearch_ja_module(monkeypatch): return importlib.reload(ja_module) -def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): +def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -57,7 +57,7 @@ def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): elasticsearch_ja_module.redis_client.set.assert_not_called() -def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch): +def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -87,7 +87,7 @@ def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monk elasticsearch_ja_module.redis_client.set.assert_called_once() -def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch): +def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_ja_module.ElasticSearchJaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py index 48f1f6dc26..d54c105a0f 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py @@ -38,7 +38,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_module(monkeypatch): +def elasticsearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -287,7 +287,7 @@ def test_search_by_vector_and_full_text(elasticsearch_module): assert "bool" in query -def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): +def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): elasticsearch_module.redis_client.set.assert_called_once() -def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch): +def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_module.ElasticSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py index f9a557ecce..8b197662e3 100644 --- a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py +++ b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py @@ -38,7 +38,7 @@ def _build_fake_hologres_modules(): @pytest.fixture -def hologres_module(monkeypatch): +def hologres_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_hologres_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -266,7 +266,7 @@ def test_delete_handles_existing_and_missing_tables(hologres_module): vector._client.drop_table.assert_called_once_with(vector.table_name) -def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch): +def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -281,7 +281,7 @@ def test_create_collection_returns_early_when_cache_hits(hologres_module, monkey hologres_module.redis_client.set.assert_not_called() -def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch): +def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -313,7 +313,7 @@ def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatc hologres_module.redis_client.set.assert_called_once() -def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch): +def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -331,7 +331,7 @@ def test_create_collection_raises_when_table_never_becomes_ready(hologres_module hologres_module.redis_client.set.assert_not_called() -def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch): +def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch: pytest.MonkeyPatch): factory = hologres_module.HologresVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py index ba3f14912b..a1617b6d43 100644 --- a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py +++ b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py @@ -29,7 +29,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def huawei_module(monkeypatch): +def huawei_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -155,7 +155,7 @@ def test_search_by_vector_and_full_text(huawei_module): assert docs[0].page_content == "text-hit" -def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch): +def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch: pytest.MonkeyPatch): class FakeDocument: def __init__(self, page_content, vector, metadata): self.page_content = page_content @@ -185,7 +185,7 @@ def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch assert docs == [] -def test_create_and_create_collection_paths(huawei_module, monkeypatch): +def test_create_and_create_collection_paths(huawei_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -218,7 +218,7 @@ def test_create_and_create_collection_paths(huawei_module, monkeypatch): huawei_module.redis_client.set.assert_called_once() -def test_huawei_factory_branches(huawei_module, monkeypatch): +def test_huawei_factory_branches(huawei_module, monkeypatch: pytest.MonkeyPatch): factory = huawei_module.HuaweiCloudVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py index 8c038e82b9..b4ea6ea6c1 100644 --- a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py +++ b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py @@ -23,7 +23,7 @@ def _build_fake_iris_module(): @pytest.fixture -def iris_module(monkeypatch): +def iris_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "iris", _build_fake_iris_module()) import dify_vdb_iris.iris_vector as module @@ -249,7 +249,7 @@ def test_iris_vector_init_get_cursor_and_create(iris_module): vector._create_collection.assert_called_once_with(2) -def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): +def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module)) @@ -297,7 +297,7 @@ def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): assert docs[0].metadata["score"] == pytest.approx(0.9) -def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): +def test_iris_vector_full_text_search_paths(iris_module, monkeypatch: pytest.MonkeyPatch): cfg = _config(iris_module, IRIS_TEXT_INDEX=True) with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", cfg) @@ -344,7 +344,7 @@ def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): assert vector_like.search_by_full_text("100%", top_k=1) == [] -def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch): +def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module, IRIS_TEXT_INDEX=True)) diff --git a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py index 238145c1d6..4a408d1b10 100644 --- a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py +++ b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py @@ -47,7 +47,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def lindorm_module(monkeypatch): +def lindorm_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -100,7 +100,7 @@ def test_to_opensearch_params_and_init(lindorm_module): assert vector_ugc._routing == "route" -def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch): +def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore( "collection", _config(lindorm_module), using_ugc=True, routing_value="route" ) @@ -301,7 +301,7 @@ def test_search_by_full_text_success_and_error(lindorm_module): vector.search_by_full_text("hello") -def test_create_collection_paths(lindorm_module, monkeypatch): +def test_create_collection_paths(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore("collection", _config(lindorm_module), using_ugc=False) with pytest.raises(ValueError, match="cannot be empty"): @@ -331,7 +331,7 @@ def test_create_collection_paths(lindorm_module, monkeypatch): vector._client.indices.create.assert_not_called() -def test_lindorm_factory_branches(lindorm_module, monkeypatch): +def test_lindorm_factory_branches(lindorm_module, monkeypatch: pytest.MonkeyPatch): factory = lindorm_module.LindormVectorStoreFactory() monkeypatch.setattr(lindorm_module.dify_config, "LINDORM_URL", "http://localhost:9200") diff --git a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py index c22f4304e5..762ec330b2 100644 --- a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py +++ b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py @@ -32,7 +32,7 @@ def _build_fake_mo_vector_modules(): @pytest.fixture -def matrixone_module(monkeypatch): +def matrixone_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_mo_vector_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -70,7 +70,7 @@ def test_matrixone_config_validation(matrixone_module, field, value, message): matrixone_module.MatrixoneConfig.model_validate(values) -def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch): +def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -86,7 +86,7 @@ def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, matrixone_module.redis_client.set.assert_called_once() -def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch): +def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -146,7 +146,7 @@ def test_get_type_and_create_delegate_to_add_texts(matrixone_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch): +def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -165,7 +165,7 @@ def test_get_client_handles_full_text_index_creation_error(matrixone_module, mon matrixone_module.redis_client.set.assert_not_called() -def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch): +def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch: pytest.MonkeyPatch): vector = matrixone_module.MatrixoneVector("collection_1", _valid_config(matrixone_module)) vector.client = MagicMock() monkeypatch.setattr(matrixone_module.uuid, "uuid4", lambda: "generated-uuid") @@ -224,7 +224,7 @@ def test_search_by_vector_builds_documents(matrixone_module): assert vector.client.query.call_args.kwargs["filter"] == {"document_id": {"$in": ["d-1"]}} -def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch): +def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch: pytest.MonkeyPatch): factory = matrixone_module.MatrixoneVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py index 36c0ed8f6f..730ff9f296 100644 --- a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py +++ b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py @@ -99,7 +99,7 @@ def _build_fake_pymilvus_modules(): @pytest.fixture -def milvus_module(monkeypatch): +def milvus_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymilvus_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -327,7 +327,7 @@ def test_process_search_results_and_search_methods(milvus_module): assert "document_id" in vector._client.search.call_args.kwargs["filter"] -def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch): +def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -351,7 +351,7 @@ def test_create_collection_cache_and_existing_collection(milvus_module, monkeypa milvus_module.redis_client.set.assert_called() -def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch): +def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -385,7 +385,7 @@ def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch) assert call_kwargs["consistency_level"] == "Session" -def test_factory_initializes_milvus_vector(milvus_module, monkeypatch): +def test_factory_initializes_milvus_vector(milvus_module, monkeypatch: pytest.MonkeyPatch): factory = milvus_module.MilvusVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py index 228ea92639..900c75fdab 100644 --- a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py +++ b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py @@ -38,7 +38,7 @@ def _build_fake_clickhouse_connect_module(): @pytest.fixture -def myscale_module(monkeypatch): +def myscale_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_clickhouse_connect_module() monkeypatch.setitem(sys.modules, "clickhouse_connect", fake_module) @@ -90,7 +90,7 @@ def test_delete_by_ids_short_circuits_on_empty_list(myscale_module): vector._client.command.assert_not_called() -def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch): +def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch: pytest.MonkeyPatch): factory = myscale_module.MyScaleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -160,7 +160,7 @@ def test_create_collection_builds_expected_sql(myscale_module): assert "INDEX text_idx text TYPE fts('tokenizer=unicode')" in sql -def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch): +def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch: pytest.MonkeyPatch): vector = myscale_module.MyScaleVector("collection_1", _config(myscale_module)) monkeypatch.setattr(myscale_module.uuid, "uuid4", lambda: "generated-uuid") docs = [ diff --git a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py index 31f9ff3e56..36393cc486 100644 --- a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py +++ b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py @@ -53,7 +53,7 @@ def _build_fake_pyobvector_module(): @pytest.fixture -def oceanbase_module(monkeypatch): +def oceanbase_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "pyobvector", _build_fake_pyobvector_module()) import dify_vdb_oceanbase.oceanbase_vector as module @@ -208,7 +208,7 @@ def test_create_delegates_to_collection_and_insert(oceanbase_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch): +def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -234,7 +234,7 @@ def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_mod vector.delete.assert_not_called() -def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch): +def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -271,7 +271,7 @@ def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, mo oceanbase_module.redis_client.set.assert_called_once() -def test_create_collection_error_paths(oceanbase_module, monkeypatch): +def test_create_collection_error_paths(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -308,7 +308,7 @@ def test_create_collection_error_paths(oceanbase_module, monkeypatch): vector._create_collection() -def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch): +def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -517,7 +517,7 @@ def test_delete_success_and_exception(oceanbase_module): vector.delete() -def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch): +def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch: pytest.MonkeyPatch): factory = oceanbase_module.OceanBaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py index 09abd625fc..57c9b14d9f 100644 --- a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py +++ b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def opengauss_module(monkeypatch): +def opengauss_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -88,7 +88,7 @@ def test_opengauss_config_validation_rejects_min_greater_than_max(opengauss_modu opengauss_module.OpenGaussConfig.model_validate(values) -def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): +def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -99,7 +99,7 @@ def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): assert vector.pool is pool -def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): +def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -126,7 +126,7 @@ def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch): +def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -158,7 +158,7 @@ def test_search_by_vector_validates_top_k(opengauss_module): vector.search_by_vector([0.1, 0.2], top_k=0) -def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch): +def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -200,7 +200,7 @@ def test_create_calls_collection_insert_and_index(opengauss_module): vector._create_index.assert_called_once_with(2) -def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): +def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -220,7 +220,7 @@ def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_not_called() -def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch): +def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -245,7 +245,7 @@ def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, m assert any("embedding_cosine_embedding_collection_1_idx" in query for query in sql) -def test_add_texts_uses_execute_values(opengauss_module, monkeypatch): +def test_add_texts_uses_execute_values(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -342,7 +342,7 @@ def test_search_by_full_text_validates_top_k(opengauss_module): vector.search_by_full_text("query", top_k=0) -def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): +def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) lock = MagicMock() @@ -370,7 +370,7 @@ def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch): +def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch: pytest.MonkeyPatch): factory = opengauss_module.OpenGaussFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py index f2ed7cb6fb..b2b004a4de 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py @@ -59,7 +59,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -95,7 +95,7 @@ class TestOpenSearchConfig: assert params["connection_class"].__name__ == "Urllib3HttpConnection" assert params["http_auth"] == ("admin", "password") - def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch): + def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py index 1c2921f85b..80bf20e820 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py @@ -58,7 +58,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -116,7 +116,7 @@ def test_config_validation_for_aws_auth_and_https_fields(opensearch_module): opensearch_module.OpenSearchConfig.model_validate(values) -def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch): +def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" @@ -167,7 +167,7 @@ def test_init_and_create_delegate_calls(opensearch_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch): +def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch: pytest.MonkeyPatch): vector = opensearch_module.OpenSearchVector("Collection_1", _config(opensearch_module, aws_service="es")) docs = [ Document(page_content="a", metadata={"doc_id": "1"}), @@ -308,7 +308,7 @@ def test_search_by_full_text_and_filters(opensearch_module): assert query["query"]["bool"]["filter"] == [{"terms": {"metadata.document_id": ["d-1"]}}] -def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch): +def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch) opensearch_module.redis_client.set.assert_called() -def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch): +def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch: pytest.MonkeyPatch): factory = opensearch_module.OpenSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py index 678cf876b0..46027c7e44 100644 --- a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py +++ b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py @@ -51,7 +51,7 @@ def _connection_with_cursor(cursor): @pytest.fixture -def oracle_module(monkeypatch): +def oracle_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_oracle_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -94,7 +94,7 @@ def test_oracle_config_validation_autonomous_requirements(oracle_module): ) -def test_init_and_get_type(oracle_module, monkeypatch): +def test_init_and_get_type(oracle_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(oracle_module.oracledb, "create_pool", MagicMock(return_value=pool)) vector = oracle_module.OracleVector("collection_1", _config(oracle_module)) @@ -139,7 +139,7 @@ def test_numpy_converters_and_type_handlers(oracle_module): assert out_float64.dtype == numpy.float64 -def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch): +def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): connect = MagicMock(return_value="connection") monkeypatch.setattr(oracle_module.oracledb, "connect", connect) @@ -173,7 +173,7 @@ def test_create_delegates_collection_and_insert(oracle_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch): +def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector.input_type_handler = MagicMock() @@ -279,7 +279,7 @@ def _fake_nltk_module(*, missing_data=False): return nltk, nltk_corpus -def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch): +def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" @@ -305,7 +305,7 @@ def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatc assert "doc_id_0" in en_params -def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch): +def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector._get_connection = MagicMock() @@ -320,7 +320,7 @@ def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeyp vector.search_by_full_text("english query") -def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): +def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -346,7 +346,9 @@ def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): oracle_module.redis_client.set.assert_called_once() -def test_oracle_factory_init_vector_uses_existing_or_generated_collection(oracle_module, monkeypatch): +def test_oracle_factory_init_vector_uses_existing_or_generated_collection( + oracle_module, monkeypatch: pytest.MonkeyPatch +): factory = oracle_module.OracleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py index c3291f7f12..1841e88139 100644 --- a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py +++ b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py @@ -79,7 +79,7 @@ def _patch_both(monkeypatch, module, calls, execute_results=None): @pytest.fixture -def pgvecto_module(monkeypatch): +def pgvecto_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pgvecto_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -126,7 +126,7 @@ def test_collection_base_has_expected_annotations(pgvecto_module): assert {"id", "text", "meta", "vector"} <= set(annotations) -def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): +def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -145,7 +145,7 @@ def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -169,7 +169,7 @@ def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): module.redis_client.set.assert_called() -def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): +def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] runtime_calls = [] @@ -241,7 +241,7 @@ def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): assert any("DROP TABLE IF EXISTS collection_1" in str(args[0]) for args, _ in runtime_calls) -def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): +def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -313,7 +313,7 @@ def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): assert vector.search_by_full_text("hello") == [] -def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch): +def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module factory = module.PGVectoRSFactory() dataset_with_index = SimpleNamespace( diff --git a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py index 99a6e00c16..38e472df63 100644 --- a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py +++ b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py @@ -336,7 +336,7 @@ def test_create_delegates_collection_creation_and_insert(): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch): +def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" @@ -387,7 +387,7 @@ def test_text_get_and_delete_methods(): assert any("DROP TABLE IF EXISTS embedding_collection_1" in sql for sql in executed_sql) -def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch): +def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" cursor = MagicMock() @@ -464,7 +464,7 @@ def test_search_by_full_text_branches_for_bigm_and_standard(): assert "bigm_similarity" in cursor.execute.call_args_list[1].args[0] -def test_pgvector_factory_initializes_expected_collection_name(monkeypatch): +def test_pgvector_factory_initializes_expected_collection_name(monkeypatch: pytest.MonkeyPatch): factory = pgvector_module.PGVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py index 0ed5491fbe..89ee0a47f1 100644 --- a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py +++ b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py @@ -121,7 +121,7 @@ def _build_fake_qdrant_modules(): @pytest.fixture -def qdrant_module(monkeypatch): +def qdrant_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_qdrant_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -170,7 +170,7 @@ def test_init_and_basic_behaviour(qdrant_module): vector.add_texts.assert_called_once() -def test_create_collection_and_add_texts(qdrant_module, monkeypatch): +def test_create_collection_and_add_texts(qdrant_module, monkeypatch: pytest.MonkeyPatch): vector = qdrant_module.QdrantVector("collection_1", "group-1", _config(qdrant_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -288,7 +288,7 @@ def test_search_and_helper_methods(qdrant_module): assert doc.page_content == "doc" -def test_qdrant_factory_paths(qdrant_module, monkeypatch): +def test_qdrant_factory_paths(qdrant_module, monkeypatch: pytest.MonkeyPatch): factory = qdrant_module.QdrantVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py index f97ad1400a..c5f3a9f847 100644 --- a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py +++ b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py @@ -59,7 +59,7 @@ def _patch_both(monkeypatch, module, session): @pytest.fixture -def relyt_module(monkeypatch): +def relyt_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_relyt_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -97,7 +97,7 @@ def test_relyt_config_validation(relyt_module, field, value, message): relyt_module.RelytConfig.model_validate(values) -def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): +def test_init_get_type_and_create_delegate(relyt_module, monkeypatch: pytest.MonkeyPatch): engine = MagicMock() monkeypatch.setattr(relyt_module, "create_engine", MagicMock(return_value=engine)) vector = relyt_module.RelytVector("collection_1", _config(relyt_module), group_id="group-1") @@ -114,7 +114,7 @@ def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -142,7 +142,7 @@ def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): relyt_module.redis_client.set.assert_called_once() -def test_add_texts_and_metadata_queries(relyt_module, monkeypatch): +def test_add_texts_and_metadata_queries(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector._group_id = "group-1" @@ -212,7 +212,7 @@ def test_delete_by_metadata_field_calls_delete_by_uuids(relyt_module): # 3. delete_by_ids translates to uuids -def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): +def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -225,7 +225,7 @@ def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): # 4. text_exists True -def test_text_exists_true(relyt_module, monkeypatch): +def test_text_exists_true(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -236,7 +236,7 @@ def test_text_exists_true(relyt_module, monkeypatch): # 5. text_exists False -def test_text_exists_false(relyt_module, monkeypatch): +def test_text_exists_false(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -284,7 +284,7 @@ def test_search_by_vector_filters_by_score_and_ids(relyt_module): # 8. delete commits session -def test_delete_drops_table(relyt_module, monkeypatch): +def test_delete_drops_table(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -295,7 +295,7 @@ def test_delete_drops_table(relyt_module, monkeypatch): session.execute.assert_called_once() -def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch): +def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch: pytest.MonkeyPatch): factory = relyt_module.RelytVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py index 62a11e0445..49d4b160cf 100644 --- a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py +++ b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py @@ -77,7 +77,7 @@ def _build_fake_tablestore_module(): @pytest.fixture -def tablestore_module(monkeypatch): +def tablestore_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_tablestore_module() monkeypatch.setitem(sys.modules, "tablestore", fake_module) @@ -177,7 +177,7 @@ def test_get_by_ids_text_exists_delete_and_wrappers(tablestore_module): vector._delete_table_if_exist.assert_called_once() -def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch): +def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch: pytest.MonkeyPatch): vector = tablestore_module.TableStoreVector("collection_1", _config(tablestore_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -289,7 +289,7 @@ def test_write_row_and_search_helpers(tablestore_module): assert "score" not in docs[0].metadata -def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch): +def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch: pytest.MonkeyPatch): factory = tablestore_module.TableStoreVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py index 299e40ee1e..e1fe227a29 100644 --- a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py +++ b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py @@ -136,7 +136,7 @@ def _build_fake_tencent_modules(): @pytest.fixture -def tencent_module(monkeypatch): +def tencent_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_tencent_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -187,7 +187,7 @@ def test_config_and_init_paths(tencent_module): assert vector._enable_hybrid_search is False -def test_create_collection_branches(tencent_module, monkeypatch): +def test_create_collection_branches(tencent_module, monkeypatch: pytest.MonkeyPatch): vector = tencent_module.TencentVector("collection_1", _config(tencent_module)) lock = MagicMock() @@ -279,7 +279,7 @@ def test_create_add_delete_and_search_behaviour(tencent_module): vector._client.drop_collection.assert_called_once() -def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch): +def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch: pytest.MonkeyPatch): factory = tencent_module.TencentVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py b/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py index ece061db67..6283dbb986 100644 --- a/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py +++ b/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py @@ -246,8 +246,18 @@ class TidbService: userPrefix = item["userPrefix"] if state == "ACTIVE" and len(userPrefix) > 0: cluster_info = tidb_serverless_list_map[item["clusterId"]] - cluster_info.status = TidbAuthBindingStatus.ACTIVE cluster_info.account = f"{userPrefix}.root" + if not cluster_info.qdrant_endpoint: + cluster_info.qdrant_endpoint = TidbService.extract_qdrant_endpoint( + item + ) or TidbService.fetch_qdrant_endpoint(api_url, public_key, private_key, item["clusterId"]) + if cluster_info.qdrant_endpoint: + cluster_info.status = TidbAuthBindingStatus.ACTIVE + else: + logger.warning( + "Cluster %s is ACTIVE but qdrant endpoint is not ready; will retry later", + item["clusterId"], + ) db.session.add(cluster_info) db.session.commit() else: diff --git a/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py b/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py index c1ffbacbbc..20a42f6cc3 100644 --- a/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py +++ b/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py @@ -1,8 +1,11 @@ +from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest from dify_vdb_tidb_on_qdrant.tidb_service import TidbService +from models.enums import TidbAuthBindingStatus + class TestExtractQdrantEndpoint: """Unit tests for TidbService.extract_qdrant_endpoint.""" @@ -216,3 +219,86 @@ class TestBatchCreateEdgeCases: private_key="priv", region="us-east-1", ) + + +class TestBatchUpdateTidbServerlessClusterStatus: + """Verify that status updates only expose clusters after qdrant endpoint is ready.""" + + @patch("dify_vdb_tidb_on_qdrant.tidb_service.db") + @patch("dify_vdb_tidb_on_qdrant.tidb_service._tidb_http_client") + def test_sets_active_when_batch_response_contains_endpoint(self, mock_http, mock_db): + binding = SimpleNamespace( + cluster_id="c-1", + status=TidbAuthBindingStatus.CREATING, + account="root", + qdrant_endpoint=None, + ) + mock_http.get.return_value = MagicMock( + status_code=200, + json=lambda: { + "clusters": [ + { + "clusterId": "c-1", + "state": "ACTIVE", + "userPrefix": "pfx", + "endpoints": {"public": {"host": "gw.tidbcloud.com"}}, + } + ] + }, + ) + + TidbService.batch_update_tidb_serverless_cluster_status([binding], "proj", "url", "iam", "pub", "priv") + + assert binding.account == "pfx.root" + assert binding.qdrant_endpoint == "https://qdrant-gw.tidbcloud.com" + assert binding.status == TidbAuthBindingStatus.ACTIVE + mock_db.session.add.assert_called_once_with(binding) + mock_db.session.commit.assert_called_once() + + @patch.object(TidbService, "fetch_qdrant_endpoint", return_value="https://qdrant-gw.tidbcloud.com") + @patch("dify_vdb_tidb_on_qdrant.tidb_service.db") + @patch("dify_vdb_tidb_on_qdrant.tidb_service._tidb_http_client") + def test_fetches_endpoint_when_batch_response_omits_it(self, mock_http, mock_db, mock_fetch_endpoint): + binding = SimpleNamespace( + cluster_id="c-1", + status=TidbAuthBindingStatus.CREATING, + account="root", + qdrant_endpoint=None, + ) + mock_http.get.return_value = MagicMock( + status_code=200, + json=lambda: {"clusters": [{"clusterId": "c-1", "state": "ACTIVE", "userPrefix": "pfx", "endpoints": {}}]}, + ) + + TidbService.batch_update_tidb_serverless_cluster_status([binding], "proj", "url", "iam", "pub", "priv") + + assert binding.account == "pfx.root" + assert binding.qdrant_endpoint == "https://qdrant-gw.tidbcloud.com" + assert binding.status == TidbAuthBindingStatus.ACTIVE + mock_fetch_endpoint.assert_called_once_with("url", "pub", "priv", "c-1") + mock_db.session.add.assert_called_once_with(binding) + mock_db.session.commit.assert_called_once() + + @patch.object(TidbService, "fetch_qdrant_endpoint", return_value=None) + @patch("dify_vdb_tidb_on_qdrant.tidb_service.db") + @patch("dify_vdb_tidb_on_qdrant.tidb_service._tidb_http_client") + def test_keeps_creating_when_endpoint_is_not_ready(self, mock_http, mock_db, mock_fetch_endpoint): + binding = SimpleNamespace( + cluster_id="c-1", + status=TidbAuthBindingStatus.CREATING, + account="root", + qdrant_endpoint=None, + ) + mock_http.get.return_value = MagicMock( + status_code=200, + json=lambda: {"clusters": [{"clusterId": "c-1", "state": "ACTIVE", "userPrefix": "pfx", "endpoints": {}}]}, + ) + + TidbService.batch_update_tidb_serverless_cluster_status([binding], "proj", "url", "iam", "pub", "priv") + + assert binding.account == "pfx.root" + assert binding.qdrant_endpoint is None + assert binding.status == TidbAuthBindingStatus.CREATING + mock_fetch_endpoint.assert_called_once_with("url", "pub", "priv", "c-1") + mock_db.session.add.assert_called_once_with(binding) + mock_db.session.commit.assert_called_once() diff --git a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py index bdbed2f740..ed03cbee88 100644 --- a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py +++ b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py @@ -46,7 +46,7 @@ def test_tidb_config_validation(tidb_module, field, value, message): tidb_module.TiDBVectorConfig.model_validate(values) -def test_init_get_type_and_distance_func(tidb_module, monkeypatch): +def test_init_get_type_and_distance_func(tidb_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(tidb_module, "create_engine", MagicMock(return_value="engine")) vector = tidb_module.TiDBVector("collection_1", _config(tidb_module), distance_func="L2") @@ -63,7 +63,7 @@ def test_init_get_type_and_distance_func(tidb_module, monkeypatch): assert vector._get_distance_func() == "VEC_COSINE_DISTANCE" -def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch): +def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch: pytest.MonkeyPatch): fake_tidb_vector = types.ModuleType("tidb_vector") fake_tidb_sqlalchemy = types.ModuleType("tidb_vector.sqlalchemy") @@ -107,7 +107,7 @@ def test_create_calls_collection_and_add_texts(tidb_module): assert vector._dimension == 2 -def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): +def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -127,7 +127,7 @@ def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): tidb_module.redis_client.set.assert_not_called() -def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch): +def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -160,7 +160,7 @@ def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monke tidb_module.redis_client.set.assert_called_once() -def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): +def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch: pytest.MonkeyPatch): class _InsertStmt: def __init__(self, table): self.table = table @@ -198,7 +198,7 @@ def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): @pytest.fixture -def tidb_vector_with_session(tidb_module, monkeypatch): +def tidb_vector_with_session(tidb_module, monkeypatch: pytest.MonkeyPatch): vector = tidb_module.TiDBVector.__new__(tidb_module.TiDBVector) vector._collection_name = "collection_1" vector._engine = MagicMock() @@ -354,7 +354,7 @@ def test_delete_by_metadata_field_does_nothing_when_no_ids(tidb_module): # Test search_by_vector filters and scores -def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): +def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = [ ('{"doc_id":"id-1","document_id":"d-1"}', "text-1", 0.2), @@ -392,7 +392,7 @@ def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): # Test delete drops table -def test_delete_drops_table(tidb_module, monkeypatch): +def test_delete_drops_table(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = None @@ -413,7 +413,7 @@ def test_delete_drops_table(tidb_module, monkeypatch): assert "DROP TABLE IF EXISTS collection_1" in drop_sql -def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch): +def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch: pytest.MonkeyPatch): factory = tidb_module.TiDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py index a884275c89..55d27ad264 100644 --- a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py +++ b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py @@ -36,7 +36,7 @@ def _build_fake_upstash_module(): @pytest.fixture -def upstash_module(monkeypatch): +def upstash_module(monkeypatch: pytest.MonkeyPatch): # Remove patched modules if present for modname in ["upstash_vector", "dify_vdb_upstash.upstash_vector"]: if modname in sys.modules: @@ -65,7 +65,7 @@ def test_upstash_config_validation(upstash_module, field, value, message): upstash_module.UpstashVectorConfig.model_validate(values) -def test_init_get_type_and_dimension(upstash_module, monkeypatch): +def test_init_get_type_and_dimension(upstash_module, monkeypatch: pytest.MonkeyPatch): vector = upstash_module.UpstashVector("collection_1", _config(upstash_module)) assert vector.get_type() == upstash_module.VectorType.UPSTASH @@ -162,7 +162,7 @@ def test_search_by_vector_filter_threshold_and_delete(upstash_module): vector.index.reset.assert_called_once() -def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch): +def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch: pytest.MonkeyPatch): factory = upstash_module.UpstashVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py index 4dfb956c00..32f47c67ed 100644 --- a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py +++ b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def vastbase_module(monkeypatch): +def vastbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -93,7 +93,7 @@ def test_vastbase_config_rejects_invalid_connection_window(vastbase_module): ) -def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): +def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(vastbase_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -114,7 +114,7 @@ def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): pool.putconn.assert_called_once_with(conn) -def test_create_and_add_texts(vastbase_module, monkeypatch): +def test_create_and_add_texts(vastbase_module, monkeypatch: pytest.MonkeyPatch): vector = vastbase_module.VastbaseVector.__new__(vastbase_module.VastbaseVector) vector.table_name = "embedding_collection_1" vector._create_collection = MagicMock() @@ -205,7 +205,7 @@ def test_search_by_vector_and_full_text(vastbase_module): assert full_docs[0].page_content == "full-text" -def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch): +def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -240,7 +240,7 @@ def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeyp vastbase_module.redis_client.set.assert_called() -def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch): +def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch: pytest.MonkeyPatch): factory = vastbase_module.VastbaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py index 544b8163be..6559ad97d2 100644 --- a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py +++ b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py @@ -79,7 +79,7 @@ def _build_fake_vikingdb_modules(): @pytest.fixture -def vikingdb_module(monkeypatch): +def vikingdb_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_vikingdb_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -117,7 +117,7 @@ def test_init_get_type_and_has_checks(vikingdb_module): assert vector._has_index() is False -def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch): +def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -253,7 +253,7 @@ def test_delete_drops_index_and_collection_when_present(vikingdb_module): vector._client.drop_collection.assert_not_called() -def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch): +def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch: pytest.MonkeyPatch): factory = vikingdb_module.VikingDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -293,7 +293,9 @@ def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, mo ("VIKINGDB_SCHEME", "VIKINGDB_SCHEME should not be None"), ], ) -def test_vikingdb_factory_raises_when_required_config_missing(vikingdb_module, monkeypatch, field, message): +def test_vikingdb_factory_raises_when_required_config_missing( + vikingdb_module, monkeypatch: pytest.MonkeyPatch, field, message +): factory = vikingdb_module.VikingDBVectorFactory() dataset = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "existing"}}, index_struct=None diff --git a/api/pyproject.toml b/api/pyproject.toml index 8e690f58a6..ed03dc99fe 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -22,7 +22,6 @@ dependencies = [ "redis[hiredis]>=7.4.0", "sendgrid>=6.12.5", "sseclient-py>=1.8.0", - # Stable: production-proven, cap below the next major "aliyun-log-python-sdk>=0.9.44,<1.0.0", "azure-identity>=1.25.3,<2.0.0", @@ -42,7 +41,6 @@ dependencies = [ "opentelemetry-propagator-b3>=1.41.1,<2.0.0", "readabilipy>=0.3.0,<1.0.0", "resend>=2.27.0,<3.0.0", - # Emerging: newer and fast-moving, use compatible pins "fastopenapi[flask]~=0.7.0", "graphon~=0.2.2", @@ -98,11 +96,13 @@ dify-trace-mlflow = { workspace = true } dify-trace-opik = { workspace = true } dify-trace-tencent = { workspace = true } dify-trace-weave = { workspace = true } +graphon = { git = "https://github.com/QuantumGhost/graphon", branch = "hitl-form-dev" } [tool.uv] default-groups = ["storage", "tools", "vdb-all", "trace-all"] package = false override-dependencies = [ + "litellm>=1.83.7", "pyarrow>=18.0.0", ] @@ -174,7 +174,7 @@ dev = [ # "locust>=2.40.4", # Temporarily removed due to compatibility issues. Uncomment when resolved. "pytest-timeout>=2.4.0", "pytest-xdist>=3.8.0", - "pyrefly>=0.62.0", + "pyrefly>=0.64.0", "xinference-client>=2.7.0", ] diff --git a/api/repositories/sqlalchemy_execution_extra_content_repository.py b/api/repositories/sqlalchemy_execution_extra_content_repository.py index 67f8795d3f..f695fd39d9 100644 --- a/api/repositories/sqlalchemy_execution_extra_content_repository.py +++ b/api/repositories/sqlalchemy_execution_extra_content_repository.py @@ -117,7 +117,7 @@ class SQLAlchemyExecutionExtraContentRepository(ExecutionExtraContentRepository) definition_payload["expiration_time"] = form.expiration_time form_definition = FormDefinition.model_validate(definition_payload) except ValueError: - logger.warning("Failed to load form definition for HumanInputContent(id=%s)", model.id) + logger.warning("Failed to load form definition for HumanInputContent(id=%s)", model.id, exc_info=True) return None node_title = form_definition.node_title or form.node_id display_in_ui = bool(form_definition.display_in_ui) @@ -125,22 +125,20 @@ class SQLAlchemyExecutionExtraContentRepository(ExecutionExtraContentRepository) submitted = form.submitted_at is not None or form.status == HumanInputFormStatus.SUBMITTED if not submitted: form_token = self._resolve_form_token(recipients_by_form_id.get(form.id, [])) - return HumanInputContentDomainModel( - workflow_run_id=model.workflow_run_id, - submitted=False, - form_definition=HumanInputFormDefinition( - form_id=form.id, - node_id=form.node_id, - node_title=node_title, - form_content=form.rendered_content, - inputs=form_definition.inputs, - actions=form_definition.user_actions, - display_in_ui=display_in_ui, - form_token=form_token, - resolved_default_values=form_definition.default_values, - expiration_time=int(form.expiration_time.timestamp()), - ), - ) + else: + form_token = None + form_definition_domain_model = HumanInputFormDefinition( + form_id=form.id, + node_id=form.node_id, + node_title=node_title, + form_content=form.rendered_content, + inputs=form_definition.inputs, + actions=form_definition.user_actions, + display_in_ui=display_in_ui, + form_token=form_token, + resolved_default_values=form_definition.default_values, + expiration_time=int(form.expiration_time.timestamp()), + ) selected_action_id = form.selected_action_id if not selected_action_id: @@ -164,17 +162,20 @@ class SQLAlchemyExecutionExtraContentRepository(ExecutionExtraContentRepository) form.rendered_content, submitted_data, _extract_output_field_names(form_definition.form_content), + form_definition.inputs, ) return HumanInputContentDomainModel( workflow_run_id=model.workflow_run_id, - submitted=True, + submitted=submitted, + form_definition=form_definition_domain_model, form_submission_data=HumanInputFormSubmissionData( node_id=form.node_id, node_title=node_title, rendered_content=rendered_content, action_id=selected_action_id, action_text=action_text, + submitted_data=submitted_data, ), ) diff --git a/api/services/file_service.py b/api/services/file_service.py index f60afe2f19..b683a2f3d4 100644 --- a/api/services/file_service.py +++ b/api/services/file_service.py @@ -107,15 +107,14 @@ class FileService: hash=hashlib.sha3_256(content).hexdigest(), source_url=source_url, ) - # The `UploadFile` ID is generated within its constructor, so flushing to retrieve the ID is unnecessary. - # We can directly generate the `source_url` here before committing. - if not upload_file.source_url: - upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) with self._session_maker(expire_on_commit=False) as session: session.add(upload_file) session.commit() + if not upload_file.source_url: + upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) + return upload_file @staticmethod diff --git a/api/services/human_input_file_upload_service.py b/api/services/human_input_file_upload_service.py new file mode 100644 index 0000000000..a9945d4478 --- /dev/null +++ b/api/services/human_input_file_upload_service.py @@ -0,0 +1,244 @@ +from __future__ import annotations + +import secrets +from dataclasses import dataclass +from datetime import datetime, timedelta + +from sqlalchemy import Engine, select +from sqlalchemy.orm import Session, selectinload, sessionmaker + +from configs import dify_config +from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus +from libs.datetime_utils import ensure_naive_utc, naive_utc_now +from models.account import Account, Tenant +from models.enums import CreatorUserRole +from models.human_input import ( + HumanInputForm, + HumanInputFormRecipient, + HumanInputFormUploadFile, + HumanInputFormUploadToken, +) +from models.model import App, EndUser +from repositories.api_workflow_run_repository import APIWorkflowRunRepository +from repositories.factory import DifyAPIRepositoryFactory +from services.human_input_service import FormExpiredError, FormNotFoundError, FormSubmittedError + +HITL_UPLOAD_TOKEN_PREFIX = "hitl_upload_" +_TOKEN_RANDOM_BYTES = 32 +_TOKEN_GENERATION_ATTEMPTS = 10 + + +@dataclass(frozen=True) +class HumanInputUploadToken: + upload_token: str + expires_at: datetime + + +@dataclass(frozen=True) +class HumanInputUploadContext: + tenant_id: str + app_id: str + form_id: str + recipient_id: str + upload_token_id: str + owner: Account | EndUser + + +class InvalidUploadTokenError(Exception): + pass + + +class HumanInputFileUploadService: + """Coordinates HITL upload tokens, workflow-run owners, and form-file links. + + Standalone HITL uploads must be owned by the original workflow/chatflow + initiator so that resume-time file restoration continues to flow through the + normal file access checks. Delivery-test forms have no workflow run, so their + uploads are scoped to the app creator account inside the form tenant. + """ + + _session_maker: sessionmaker[Session] + _workflow_run_repository: APIWorkflowRunRepository + + def __init__( + self, + session_factory: sessionmaker[Session] | Engine, + workflow_run_repository: APIWorkflowRunRepository | None = None, + ): + if isinstance(session_factory, Engine): + session_factory = sessionmaker(bind=session_factory) + self._session_maker = session_factory + self._workflow_run_repository = ( + workflow_run_repository or DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_factory) + ) + + def issue_upload_token(self, form_token: str) -> HumanInputUploadToken: + """Create an upload token for an active human input recipient token.""" + + with self._session_maker(expire_on_commit=False) as session, session.begin(): + recipient_model = session.scalar( + select(HumanInputFormRecipient) + .options(selectinload(HumanInputFormRecipient.form)) + .where(HumanInputFormRecipient.access_token == form_token) + .limit(1) + ) + if recipient_model is None or recipient_model.form is None: + raise FormNotFoundError() + + form = recipient_model.form + self._ensure_form_model_active(form) + upload_token = self._generate_unique_upload_token(session) + token_model = HumanInputFormUploadToken( + tenant_id=form.tenant_id, + app_id=form.app_id, + form_id=form.id, + recipient_id=recipient_model.id, + token=upload_token, + ) + session.add(token_model) + + return HumanInputUploadToken(upload_token=upload_token, expires_at=form.expiration_time) + + def validate_upload_token(self, upload_token: str) -> HumanInputUploadContext: + """Resolve an upload token and ensure the bound form is still active.""" + + query = ( + select(HumanInputFormUploadToken) + .options(selectinload(HumanInputFormUploadToken.form)) + .where(HumanInputFormUploadToken.token == upload_token) + .limit(1) + ) + with self._session_maker(expire_on_commit=False) as session: + token_model = session.scalars(query).first() + if token_model is None: + raise InvalidUploadTokenError() + + form_model = token_model.form + if form_model is None: + raise InvalidUploadTokenError() + self._ensure_form_model_active(form_model) + + owner = self._resolve_upload_owner(session=session, form_model=form_model) + + return HumanInputUploadContext( + tenant_id=token_model.tenant_id, + app_id=token_model.app_id, + form_id=token_model.form_id, + recipient_id=token_model.recipient_id, + upload_token_id=token_model.id, + owner=owner, + ) + + def record_upload_file(self, *, context: HumanInputUploadContext, file_id: str) -> None: + """Record that a file was uploaded through a specific form upload token.""" + + with self._session_maker(expire_on_commit=False) as session, session.begin(): + session.add( + HumanInputFormUploadFile( + tenant_id=context.tenant_id, + app_id=context.app_id, + form_id=context.form_id, + upload_file_id=file_id, + upload_token_id=context.upload_token_id, + ) + ) + + def _generate_unique_upload_token(self, session: Session) -> str: + return f"{HITL_UPLOAD_TOKEN_PREFIX}{secrets.token_urlsafe(_TOKEN_RANDOM_BYTES)}" + + def _resolve_upload_owner( + self, + *, + session: Session, + form_model: HumanInputForm, + ) -> Account | EndUser: + if form_model.workflow_run_id is None: + if form_model.form_kind == HumanInputFormKind.DELIVERY_TEST: + return self._resolve_delivery_test_upload_owner(session=session, form_model=form_model) + raise InvalidUploadTokenError() + + workflow_run = self._workflow_run_repository.get_workflow_run_by_id( + tenant_id=form_model.tenant_id, + app_id=form_model.app_id, + run_id=form_model.workflow_run_id, + ) + if workflow_run is None: + raise InvalidUploadTokenError() + + if workflow_run.created_by_role == CreatorUserRole.END_USER: + end_user = session.scalar( + select(EndUser) + .where( + EndUser.id == workflow_run.created_by, + EndUser.tenant_id == workflow_run.tenant_id, + EndUser.app_id == workflow_run.app_id, + ) + .limit(1) + ) + if end_user is None: + raise InvalidUploadTokenError() + return end_user + + if workflow_run.created_by_role != CreatorUserRole.ACCOUNT: + raise InvalidUploadTokenError() + + account = session.scalar(select(Account).where(Account.id == workflow_run.created_by).limit(1)) + if account is None: + raise InvalidUploadTokenError() + + tenant = session.scalar(select(Tenant).where(Tenant.id == workflow_run.tenant_id).limit(1)) + if tenant is None: + raise InvalidUploadTokenError() + + # HITL upload runs outside the normal account auth flow, so hydrate the + # account tenant context explicitly before delegating to FileService. + account.current_tenant = tenant + return account + + def _resolve_delivery_test_upload_owner( + self, + *, + session: Session, + form_model: HumanInputForm, + ) -> Account: + app = session.scalar( + select(App) + .where( + App.id == form_model.app_id, + App.tenant_id == form_model.tenant_id, + ) + .limit(1) + ) + if app is None or app.created_by is None: + raise InvalidUploadTokenError() + + account = session.scalar(select(Account).where(Account.id == app.created_by).limit(1)) + if account is None: + raise InvalidUploadTokenError() + + tenant = session.scalar(select(Tenant).where(Tenant.id == form_model.tenant_id).limit(1)) + if tenant is None: + raise InvalidUploadTokenError() + + account.current_tenant = tenant + if account.current_tenant_id != form_model.tenant_id: + raise InvalidUploadTokenError() + return account + + @staticmethod + def _ensure_form_model_active(form: HumanInputForm) -> None: + if form.submitted_at is not None or form.status == HumanInputFormStatus.SUBMITTED: + raise FormSubmittedError(form.id) + if form.status in {HumanInputFormStatus.TIMEOUT, HumanInputFormStatus.EXPIRED}: + raise FormExpiredError(form.id) + + now = naive_utc_now() + if ensure_naive_utc(form.expiration_time) <= now: + raise FormExpiredError(form.id) + + global_timeout_seconds = dify_config.HUMAN_INPUT_GLOBAL_TIMEOUT_SECONDS + if global_timeout_seconds <= 0 or form.workflow_run_id is None: + return + global_deadline = ensure_naive_utc(form.created_at) + timedelta(seconds=global_timeout_seconds) + if global_deadline <= now: + raise FormExpiredError(form.id) diff --git a/api/services/human_input_service.py b/api/services/human_input_service.py index 76598d31ac..61b6f720de 100644 --- a/api/services/human_input_service.py +++ b/api/services/human_input_service.py @@ -1,22 +1,33 @@ import logging -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from datetime import datetime, timedelta -from typing import Any +from typing import Any, Protocol, cast +from pydantic import JsonValue from sqlalchemy import Engine, select from sqlalchemy.orm import Session, sessionmaker from configs import dify_config +from core.app.file_access import DatabaseFileAccessController from core.repositories.human_input_repository import ( HumanInputFormRecord, HumanInputFormSubmissionRepository, ) +from factories.file_factory import build_from_mapping, build_from_mappings +from graphon.file import FileUploadConfig from graphon.nodes.human_input.entities import ( + FileInputConfig, + FileListInputConfig, FormDefinition, + FormInputConfig, HumanInputSubmissionValidationError, - validate_human_input_submission, + SelectInputConfig, + UserActionConfig, ) -from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus +from graphon.nodes.human_input.entities import ( + validate_human_input_submission as graphon_validate_human_input_submission, +) +from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus, ValueSourceType from libs.datetime_utils import ensure_naive_utc, naive_utc_now from libs.exception import BaseHTTPException from models.human_input import RecipientType @@ -24,6 +35,8 @@ from models.model import App, AppMode from repositories.factory import DifyAPIRepositoryFactory from tasks.app_generate.workflow_execute_task import resume_app_execution +_file_access_controller = DatabaseFileAccessController() + class Form: def __init__(self, record: HumanInputFormRecord): @@ -82,7 +95,7 @@ class HumanInputError(Exception): pass -class FormSubmittedError(HumanInputError, BaseHTTPException): +class FormSubmittedError(BaseHTTPException, HumanInputError): error_code = "human_input_form_submitted" description = "This form has already been submitted by another user, form_id={form_id}" code = 412 @@ -90,37 +103,48 @@ class FormSubmittedError(HumanInputError, BaseHTTPException): def __init__(self, form_id: str): template = self.description or "This form has already been submitted by another user, form_id={form_id}" description = template.format(form_id=form_id) - super().__init__(description=description) + BaseHTTPException.__init__(self, description=description) -class FormNotFoundError(HumanInputError, BaseHTTPException): +class FormNotFoundError(BaseHTTPException, HumanInputError): error_code = "human_input_form_not_found" code = 404 -class InvalidFormDataError(HumanInputError, BaseHTTPException): +class InvalidFormDataError(BaseHTTPException, HumanInputError): error_code = "invalid_form_data" code = 400 def __init__(self, description: str): - super().__init__(description=description) + BaseHTTPException.__init__(self, description=description) class WebAppDeliveryNotEnabledError(HumanInputError, BaseException): pass -class FormExpiredError(HumanInputError, BaseHTTPException): +class FormExpiredError(BaseHTTPException, HumanInputError): error_code = "human_input_form_expired" code = 412 def __init__(self, form_id: str): - super().__init__(description=f"This form has expired, form_id={form_id}") + BaseHTTPException.__init__( + self, + description=f"This form has expired, form_id={form_id}", + ) logger = logging.getLogger(__name__) +class FormDefinitionProtocol(Protocol): + @property + def inputs(self) -> Sequence[FormInputConfig]: ... + + @property + def user_actions(self) -> Sequence[UserActionConfig]: ... + + class HumanInputService: def __init__( self, @@ -157,7 +181,7 @@ class HumanInputService: recipient_type: RecipientType, form_token: str, selected_action_id: str, - form_data: Mapping[str, Any], + form_data: Mapping[str, JsonValue], submission_end_user_id: str | None = None, submission_user_id: str | None = None, ): @@ -166,13 +190,17 @@ class HumanInputService: raise WebAppDeliveryNotEnabledError() self.ensure_form_active(form) - self._validate_submission(form=form, selected_action_id=selected_action_id, form_data=form_data) + normalized_form_data = self._validate_submission( + form=form, + selected_action_id=selected_action_id, + form_data=form_data, + ) result = self._form_repository.mark_submitted( form_id=form.id, recipient_id=form.recipient_id, selected_action_id=selected_action_id, - form_data=form_data, + form_data=normalized_form_data, submission_user_id=submission_user_id, submission_end_user_id=submission_end_user_id, ) @@ -198,12 +226,17 @@ class HumanInputService: if form.submitted: raise FormSubmittedError(form.id) - def _validate_submission(self, form: Form, selected_action_id: str, form_data: Mapping[str, Any]) -> None: + def _validate_submission( + self, + form: Form, + selected_action_id: str, + form_data: Mapping[str, Any], + ) -> dict[str, JsonValue]: definition = form.get_definition() try: - validate_human_input_submission( - inputs=definition.inputs, - user_actions=definition.user_actions, + return self.validate_and_normalize_submission( + tenant_id=form.tenant_id, + form_definition=definition, selected_action_id=selected_action_id, form_data=form_data, ) @@ -247,3 +280,184 @@ class HumanInputService: created_at = ensure_naive_utc(form.created_at) global_deadline = created_at + timedelta(seconds=global_timeout_seconds) return global_deadline <= current + + @staticmethod + def validate_human_input_submission( + *, + form_definition: FormDefinitionProtocol, + selected_action_id: str, + form_data: Mapping[str, Any], + ) -> None: + graphon_validate_human_input_submission( + inputs=form_definition.inputs, + user_actions=form_definition.user_actions, + selected_action_id=selected_action_id, + form_data=form_data, + ) + + @classmethod + def validate_and_normalize_submission( + cls, + *, + tenant_id: str, + form_definition: FormDefinitionProtocol, + selected_action_id: str, + form_data: Mapping[str, Any], + ) -> dict[str, JsonValue]: + """ + Normalize Dify-owned runtime payloads before delegating shape validation to graphon. + + graphon owns the form schema and validation rules, while Dify owns tenant-aware file + reconstruction and persistence compatibility for submitted payloads. + """ + normalized_form_data = cls.normalize_submission_data( + tenant_id=tenant_id, + form_definition=form_definition, + form_data=form_data, + ) + graphon_validate_human_input_submission( + inputs=form_definition.inputs, + user_actions=form_definition.user_actions, + selected_action_id=selected_action_id, + form_data=normalized_form_data, + ) + return normalized_form_data + + @classmethod + def normalize_submission_data( + cls, + *, + tenant_id: str, + form_definition: FormDefinitionProtocol, + form_data: Mapping[str, Any], + ) -> dict[str, JsonValue]: + normalized_form_data: dict[str, JsonValue] = {key: cast(JsonValue, value) for key, value in form_data.items()} + inputs_by_name = {form_input.output_variable_name: form_input for form_input in form_definition.inputs} + for name, form_input in inputs_by_name.items(): + if name not in form_data: + continue + normalized_form_data[name] = cls._normalize_input_value( + tenant_id=tenant_id, + form_input=form_input, + value=form_data[name], + ) + + return normalized_form_data + + @classmethod + def _normalize_input_value( + cls, + *, + tenant_id: str, + form_input: FormInputConfig, + value: Any, + ) -> JsonValue: + if isinstance(form_input, SelectInputConfig): + return cls._normalize_select_value(form_input=form_input, value=value) + if isinstance(form_input, FileInputConfig): + return cls._normalize_file_value( + tenant_id=tenant_id, + form_input=form_input, + value=value, + ) + if isinstance(form_input, FileListInputConfig): + return cls._normalize_file_list_value( + tenant_id=tenant_id, + form_input=form_input, + value=value, + ) + return cast(JsonValue, value) + + @classmethod + def _normalize_select_value( + cls, + *, + form_input: SelectInputConfig, + value: Any, + ) -> JsonValue: + if not isinstance(value, str): + raise HumanInputSubmissionValidationError( + f"Invalid value for select input '{form_input.output_variable_name}': expected string" + ) + option_source = form_input.option_source + if option_source.type == ValueSourceType.CONSTANT and value not in option_source.value: + raise HumanInputSubmissionValidationError( + f"Invalid value for select input '{form_input.output_variable_name}': {value}" + ) + return value + + @classmethod + def _normalize_file_value( + cls, + *, + tenant_id: str, + form_input: FileInputConfig, + value: Any, + ) -> JsonValue: + if not isinstance(value, Mapping): + raise HumanInputSubmissionValidationError( + f"Invalid value for file input '{form_input.output_variable_name}': expected mapping" + ) + upload_config = cls._build_file_upload_config(form_input=form_input, number_limits=1) + try: + # `build_from_mapping` enforces tenant ownership for persisted upload references. + file = build_from_mapping( + mapping=value, + tenant_id=tenant_id, + config=upload_config, + strict_type_validation=True, + access_controller=_file_access_controller, + ) + except ValueError as exc: + raise HumanInputSubmissionValidationError( + f"Invalid value for file input '{form_input.output_variable_name}': {exc}" + ) from exc + return cast(JsonValue, file.to_dict()) + + @classmethod + def _normalize_file_list_value( + cls, + *, + tenant_id: str, + form_input: FileListInputConfig, + value: Any, + ) -> JsonValue: + if not isinstance(value, list): + raise HumanInputSubmissionValidationError( + f"Invalid value for file list input '{form_input.output_variable_name}': expected list" + ) + if any(not isinstance(item, Mapping) for item in value): + raise HumanInputSubmissionValidationError( + f"Invalid value for file list input '{form_input.output_variable_name}': expected list of mappings" + ) + upload_config = cls._build_file_upload_config( + form_input=form_input, + number_limits=form_input.number_limits, + ) + try: + # `build_from_mappings` performs the same tenant-aware ownership validation in batch. + files = build_from_mappings( + mappings=cast(Sequence[Mapping[str, Any]], value), + tenant_id=tenant_id, + config=upload_config, + strict_type_validation=True, + access_controller=_file_access_controller, + ) + except ValueError as exc: + raise HumanInputSubmissionValidationError( + f"Invalid value for file list input '{form_input.output_variable_name}': {exc}" + ) from exc + return cast(JsonValue, [file.to_dict() for file in files]) + + @staticmethod + def _build_file_upload_config( + *, + form_input: FileInputConfig | FileListInputConfig, + number_limits: int, + ) -> FileUploadConfig: + return FileUploadConfig( + allowed_file_types=list(form_input.allowed_file_types), + allowed_file_extensions=list(form_input.allowed_file_extensions), + allowed_file_upload_methods=list(form_input.allowed_file_upload_methods), + number_limits=number_limits, + ) diff --git a/api/services/recommend_app/category_order.py b/api/services/recommend_app/category_order.py new file mode 100644 index 0000000000..be6b112aa4 --- /dev/null +++ b/api/services/recommend_app/category_order.py @@ -0,0 +1,49 @@ +"""Apply Redis-backed category ordering for DB-backed Explore apps.""" + +import json +import logging +from collections.abc import Collection +from typing import Any + +from extensions.ext_redis import redis_client + +logger = logging.getLogger(__name__) + +EXPLORE_APP_CATEGORY_ORDER_KEY_PREFIX = "explore:apps:category_order" + + +def _category_order_key(language: str) -> str: + return f"{EXPLORE_APP_CATEGORY_ORDER_KEY_PREFIX}:{language}" + + +def get_explore_app_category_order(language: str) -> list[str]: + try: + raw_categories = redis_client.get(_category_order_key(language)) + except Exception: + logger.exception("Failed to read explore app category order from Redis.") + return [] + + if not raw_categories: + return [] + + if isinstance(raw_categories, bytes): + raw_categories = raw_categories.decode("utf-8") + + try: + categories: Any = json.loads(raw_categories) + except (TypeError, json.JSONDecodeError): + logger.warning("Invalid explore app category order payload for language %s.", language) + return [] + + if not isinstance(categories, list): + return [] + + return [category for category in categories if isinstance(category, str)] + + +def order_categories(categories: Collection[str], language: str) -> list[str]: + configured_order = get_explore_app_category_order(language) + if configured_order: + return configured_order + + return sorted(categories) diff --git a/api/services/recommend_app/database/database_retrieval.py b/api/services/recommend_app/database/database_retrieval.py index 1df5fd13b6..ac870f0700 100644 --- a/api/services/recommend_app/database/database_retrieval.py +++ b/api/services/recommend_app/database/database_retrieval.py @@ -6,6 +6,7 @@ from constants.languages import languages from extensions.ext_database import db from models.model import App, RecommendedApp from services.app_dsl_service import AppDslService +from services.recommend_app.category_order import order_categories from services.recommend_app.recommend_app_base import RecommendAppRetrievalBase from services.recommend_app.recommend_app_type import RecommendAppType @@ -18,7 +19,7 @@ class RecommendedAppItemDict(TypedDict): copyright: Any privacy_policy: Any custom_disclaimer: str - category: str + categories: list[str] position: int is_listed: bool @@ -80,6 +81,7 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase): if not site: continue + app_categories = recommended_app.categories or [] recommended_app_result: RecommendedAppItemDict = { "id": recommended_app.id, "app": recommended_app.app, @@ -88,15 +90,18 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase): "copyright": site.copyright, "privacy_policy": site.privacy_policy, "custom_disclaimer": site.custom_disclaimer, - "category": recommended_app.category, + "categories": app_categories, "position": recommended_app.position, "is_listed": recommended_app.is_listed, } recommended_apps_result.append(recommended_app_result) - categories.add(recommended_app.category) + categories.update(app_categories) - return RecommendedAppsResultDict(recommended_apps=recommended_apps_result, categories=sorted(categories)) + return RecommendedAppsResultDict( + recommended_apps=recommended_apps_result, + categories=order_categories(categories, language), + ) @classmethod def fetch_recommended_app_detail_from_db(cls, app_id: str) -> RecommendedAppDetailDict | None: diff --git a/api/services/tag_service.py b/api/services/tag_service.py index 1882c855ea..8043a99be1 100644 --- a/api/services/tag_service.py +++ b/api/services/tag_service.py @@ -1,9 +1,11 @@ import uuid +from typing import cast import sqlalchemy as sa from flask_login import current_user from pydantic import BaseModel, Field -from sqlalchemy import func, select +from sqlalchemy import delete, func, select +from sqlalchemy.engine import CursorResult from werkzeug.exceptions import NotFound from extensions.ext_database import db @@ -29,7 +31,7 @@ class TagBindingCreatePayload(BaseModel): class TagBindingDeletePayload(BaseModel): - tag_id: str + tag_ids: list[str] = Field(min_length=1) target_id: str type: TagType @@ -178,13 +180,18 @@ class TagService: @staticmethod def delete_tag_binding(payload: TagBindingDeletePayload): TagService.check_target_exists(payload.type, payload.target_id) - tag_binding = db.session.scalar( - select(TagBinding) - .where(TagBinding.target_id == payload.target_id, TagBinding.tag_id == payload.tag_id) - .limit(1) + result = cast( + CursorResult, + db.session.execute( + delete(TagBinding).where( + TagBinding.target_id == payload.target_id, + TagBinding.tag_id.in_(payload.tag_ids), + TagBinding.tenant_id == current_user.current_tenant_id, + ) + ), ) - if tag_binding: - db.session.delete(tag_binding) + + if result.rowcount: db.session.commit() @staticmethod diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index b8242ab3a5..20de1f4058 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -408,7 +408,7 @@ class BuiltinToolManageService: return {"result": "success"} @staticmethod - def set_default_provider(tenant_id: str, user_id: str, provider: str, id: str): + def set_default_provider(tenant_id: str, provider: str, id: str): """ set default provider """ @@ -422,12 +422,11 @@ class BuiltinToolManageService: if target_provider is None: raise ValueError("provider not found") - # clear default provider + # clear default provider (tenant-scoped: only one default per provider per workspace) session.execute( update(BuiltinToolProvider) .where( BuiltinToolProvider.tenant_id == tenant_id, - BuiltinToolProvider.user_id == user_id, BuiltinToolProvider.provider == provider, BuiltinToolProvider.is_default.is_(True), ) diff --git a/api/services/variable_truncator.py b/api/services/variable_truncator.py index 1529c2b98f..5dd5f6873f 100644 --- a/api/services/variable_truncator.py +++ b/api/services/variable_truncator.py @@ -194,14 +194,15 @@ class VariableTruncator(BaseTruncator): result: _PartResult[Any] # Apply type-specific truncation with target size - if isinstance(segment, ArraySegment): - result = self._truncate_array(segment.value, target_size) - elif isinstance(segment, StringSegment): - result = self._truncate_string(segment.value, target_size) - elif isinstance(segment, ObjectSegment): - result = self._truncate_object(segment.value, target_size) - else: - raise AssertionError("this should be unreachable.") + match segment: + case ArraySegment(): + result = self._truncate_array(segment.value, target_size) + case StringSegment(): + result = self._truncate_string(segment.value, target_size) + case ObjectSegment(): + result = self._truncate_object(segment.value, target_size) + case _: + raise AssertionError("this should be unreachable.") return _PartResult( value=segment.model_copy(update={"value": result.value}), @@ -219,40 +220,41 @@ class VariableTruncator(BaseTruncator): return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) if depth > _MAX_DEPTH: raise MaxDepthExceededError() - if isinstance(value, str): - # Ideally, the size of strings should be calculated based on their utf-8 encoded length. - # However, this adds complexity as we would need to compute encoded sizes consistently - # throughout the code. Therefore, we approximate the size using the string's length. - # Rough estimate: number of characters, plus 2 for quotes - return len(value) + 2 - elif isinstance(value, (int, float)): - return len(str(value)) - elif isinstance(value, bool): - return 4 if value else 5 # "true" or "false" - elif value is None: - return 4 # "null" - elif isinstance(value, list): - # Size = sum of elements + separators + brackets - total = 2 # "[]" - for i, item in enumerate(value): - if i > 0: - total += 1 # "," - total += VariableTruncator.calculate_json_size(item, depth=depth + 1) - return total - elif isinstance(value, dict): - # Size = sum of keys + values + separators + brackets - total = 2 # "{}" - for index, key in enumerate(value.keys()): - if index > 0: - total += 1 # "," - total += VariableTruncator.calculate_json_size(str(key), depth=depth + 1) # Key as string - total += 1 # ":" - total += VariableTruncator.calculate_json_size(value[key], depth=depth + 1) - return total - elif isinstance(value, File): - return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) - else: - raise UnknownTypeError(f"got unknown type {type(value)}") + match value: + case str(): + # Ideally, the size of strings should be calculated based on their utf-8 encoded length. + # However, this adds complexity as we would need to compute encoded sizes consistently + # throughout the code. Therefore, we approximate the size using the string's length. + # Rough estimate: number of characters, plus 2 for quotes + return len(value) + 2 + case bool(): + return 4 if value else 5 # "true" or "false" + case int() | float(): + return len(str(value)) + case None: + return 4 # "null" + case list(): + # Size = sum of elements + separators + brackets + total = 2 # "[]" + for i, item in enumerate(value): + if i > 0: + total += 1 # "," + total += VariableTruncator.calculate_json_size(item, depth=depth + 1) + return total + case dict(): + # Size = sum of keys + values + separators + brackets + total = 2 # "{}" + for index, key in enumerate(value.keys()): + if index > 0: + total += 1 # "," + total += VariableTruncator.calculate_json_size(str(key), depth=depth + 1) # Key as string + total += 1 # ":" + total += VariableTruncator.calculate_json_size(value[key], depth=depth + 1) + return total + case File(): + return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) + case _: + raise UnknownTypeError(f"got unknown type {type(value)}") def _truncate_string(self, value: str, target_size: int) -> _PartResult[str]: if (size := self.calculate_json_size(value)) < target_size: @@ -419,22 +421,23 @@ class VariableTruncator(BaseTruncator): target_size: int, ) -> _PartResult[Any]: """Truncate a value within an object to fit within budget.""" - if isinstance(val, UpdatedVariable): - # TODO(Workflow): push UpdatedVariable normalization closer to its producer. - return self._truncate_object(val.model_dump(), target_size) - elif isinstance(val, str): - return self._truncate_string(val, target_size) - elif isinstance(val, list): - return self._truncate_array(val, target_size) - elif isinstance(val, dict): - return self._truncate_object(val, target_size) - elif isinstance(val, File): - # File objects should not be truncated, return as-is - return _PartResult(val, self.calculate_json_size(val), False) - elif val is None or isinstance(val, (bool, int, float)): - return _PartResult(val, self.calculate_json_size(val), False) - else: - raise AssertionError("this statement should be unreachable.") + match val: + case UpdatedVariable(): + # TODO(Workflow): push UpdatedVariable normalization closer to its producer. + return self._truncate_object(val.model_dump(), target_size) + case str(): + return self._truncate_string(val, target_size) + case list(): + return self._truncate_array(val, target_size) + case dict(): + return self._truncate_object(val, target_size) + case File(): + # File objects should not be truncated, return as-is + return _PartResult(val, self.calculate_json_size(val), False) + case None | bool() | int() | float(): + return _PartResult(val, self.calculate_json_size(val), False) + case _: + raise AssertionError("this statement should be unreachable.") class DummyVariableTruncator(BaseTruncator): diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 5cf81915d6..68cf74ec9f 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -55,7 +55,7 @@ from graphon.node_events import NodeRunResult from graphon.nodes import BuiltinNodeTypes from graphon.nodes.base.node import Node from graphon.nodes.http_request import HTTP_REQUEST_CONFIG_FILTER_KEY, build_http_request_config -from graphon.nodes.human_input.entities import HumanInputNodeData, validate_human_input_submission +from graphon.nodes.human_input.entities import HumanInputNodeData from graphon.nodes.human_input.enums import HumanInputFormKind from graphon.nodes.human_input.human_input_node import HumanInputNode from graphon.nodes.start.entities import StartNodeData @@ -85,6 +85,7 @@ from services.errors.app import ( WorkflowHashNotEqualError, WorkflowNotFoundError, ) +from services.human_input_service import HumanInputService from services.workflow.workflow_converter import WorkflowConverter from .errors.workflow_service import DraftWorkflowDeletionError, WorkflowInUseError @@ -1292,18 +1293,22 @@ class WorkflowService: ) node_data = node.node_data - validate_human_input_submission( - inputs=node_data.inputs, - user_actions=node_data.user_actions, + human_input_service = HumanInputService(session_factory=sessionmaker(db.engine)) + normalized_form_inputs = human_input_service.validate_and_normalize_submission( + tenant_id=app_model.tenant_id, + form_definition=node_data, selected_action_id=action, form_data=form_inputs, ) rendered_content = node.render_form_content_before_submission() - outputs: dict[str, Any] = dict(form_inputs) + outputs: dict[str, Any] = dict(normalized_form_inputs) outputs["__action_id"] = action outputs["__rendered_content"] = node.render_form_content_with_outputs( - rendered_content, outputs, node_data.outputs_field_names() + rendered_content, + outputs, + node_data.outputs_field_names(), + node_data.inputs, ) enclosing_node_type_and_id = draft_workflow.get_enclosing_node_type_and_id( diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index 3b5e822b90..90131fe98d 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -13,7 +13,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import ConversationFromSource +from models.enums import AppStatus, ConversationFromSource from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -28,7 +28,7 @@ class TestChatMessageApiPermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL return app @pytest.fixture @@ -78,7 +78,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -130,7 +130,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py index 309a0b015a..c4db0d5111 100644 --- a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py +++ b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py @@ -14,7 +14,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import FeedbackFromSource, FeedbackRating +from models.enums import AppStatus, FeedbackFromSource, FeedbackRating from models.model import AppMode, MessageFeedback from services.feedback_service import FeedbackService @@ -29,7 +29,7 @@ class TestFeedbackExportApi: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.name = "Test App" return app @@ -135,7 +135,7 @@ class TestFeedbackExportApi: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -167,7 +167,13 @@ class TestFeedbackExportApi: mock_export_feedbacks.assert_called_once() def test_feedback_export_csv_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in CSV format.""" @@ -202,7 +208,13 @@ class TestFeedbackExportApi: assert "text/csv" in response.content_type def test_feedback_export_json_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in JSON format.""" @@ -246,7 +258,7 @@ class TestFeedbackExportApi: assert "application/json" in response.content_type def test_feedback_export_with_filters( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with various filters.""" @@ -287,7 +299,7 @@ class TestFeedbackExportApi: ) def test_feedback_export_invalid_date_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with invalid date format.""" @@ -312,7 +324,7 @@ class TestFeedbackExportApi: assert "Parameter validation error" in response_json["error"] def test_feedback_export_server_error( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with server error.""" diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index 04945e57a0..ab08c7a6d8 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -11,6 +11,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole +from models.enums import AppStatus from models.model import AppMode from services.app_model_config_service import AppModelConfigService @@ -25,7 +26,7 @@ class TestModelConfigResourcePermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.app_model_config_id = str(uuid.uuid4()) return app @@ -73,7 +74,7 @@ class TestModelConfigResourcePermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py index a876b0c4aa..7d0b575262 100644 --- a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py +++ b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py @@ -1,5 +1,7 @@ from collections.abc import Generator +from pytest_mock import MockerFixture + from core.datasource.datasource_manager import DatasourceManager from core.datasource.entities.datasource_entities import DatasourceMessage from graphon.node_events import StreamCompletedEvent @@ -19,7 +21,7 @@ def _gen_var_stream() -> Generator[DatasourceMessage, None, None]: ) -def test_stream_node_events_accumulates_variables(mocker): +def test_stream_node_events_accumulates_variables(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_var_stream()) events = list( DatasourceManager.stream_node_events( diff --git a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py index 2392084c36..2c1e667c58 100644 --- a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py +++ b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GP: call_depth = 0 -def test_node_integration_minimal_stream(mocker): +def test_node_integration_minimal_stream(mocker: MockerFixture): sys_d = { "sys": { "datasource_type": "online_document", diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index a8e9422c1e..493330e02b 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -2,6 +2,8 @@ import time import uuid from unittest.mock import MagicMock, patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.tools.utils.configuration import ToolParameterConfigurationManager from core.workflow.node_factory import DifyNodeFactory @@ -71,7 +73,7 @@ def init_tool_node(config: dict): return node -def test_tool_variable_invoke(monkeypatch): +def test_tool_variable_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", @@ -106,7 +108,7 @@ def test_tool_variable_invoke(monkeypatch): assert item.node_run_result.outputs.get("text") is not None -def test_tool_mixed_invoke(monkeypatch): +def test_tool_mixed_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", diff --git a/api/tests/test_containers_integration_tests/conftest.py b/api/tests/test_containers_integration_tests/conftest.py index 66a25e5daf..b4482674da 100644 --- a/api/tests/test_containers_integration_tests/conftest.py +++ b/api/tests/test_containers_integration_tests/conftest.py @@ -433,7 +433,7 @@ def flask_app_with_containers(set_up_containers_and_env) -> Flask: @pytest.fixture -def flask_req_ctx_with_containers(flask_app_with_containers) -> Generator[None, None, None]: +def flask_req_ctx_with_containers(flask_app_with_containers: Flask) -> Generator[None, None, None]: """ Request context fixture for containerized Flask application. @@ -454,7 +454,7 @@ def flask_req_ctx_with_containers(flask_app_with_containers) -> Generator[None, @pytest.fixture -def test_client_with_containers(flask_app_with_containers) -> Generator[FlaskClient, None, None]: +def test_client_with_containers(flask_app_with_containers: Flask) -> Generator[FlaskClient, None, None]: """ Test client fixture for containerized Flask application. @@ -475,7 +475,7 @@ def test_client_with_containers(flask_app_with_containers) -> Generator[FlaskCli @pytest.fixture -def db_session_with_containers(flask_app_with_containers) -> Generator[Session, None, None]: +def db_session_with_containers(flask_app_with_containers: Flask) -> Generator[Session, None, None]: """ Database session fixture for containerized testing. diff --git a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py index 18755ef012..bb737754a1 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py +++ b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py @@ -7,6 +7,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from pydantic import ValidationError from werkzeug.exceptions import BadRequest, NotFound @@ -69,7 +70,7 @@ def _unwrap(func): class TestCompletionEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_completion_create_payload(self): @@ -86,7 +87,7 @@ class TestCompletionEndpoints: ) assert payload.query == "hi" - def test_completion_api_success(self, app, monkeypatch): + def test_completion_api_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -116,7 +117,7 @@ class TestCompletionEndpoints: assert resp == {"result": {"text": "ok"}} - def test_completion_api_conversation_not_exists(self, app, monkeypatch): + def test_completion_api_conversation_not_exists(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -142,7 +143,7 @@ class TestCompletionEndpoints: with pytest.raises(NotFound): method(app_model=MagicMock(id="app-1")) - def test_completion_api_provider_not_initialized(self, app, monkeypatch): + def test_completion_api_provider_not_initialized(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -166,7 +167,7 @@ class TestCompletionEndpoints: with pytest.raises(completion_module.ProviderNotInitializeError): method(app_model=MagicMock(id="app-1")) - def test_completion_api_quota_exceeded(self, app, monkeypatch): + def test_completion_api_quota_exceeded(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -193,10 +194,10 @@ class TestCompletionEndpoints: class TestAppEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_app_put_should_preserve_icon_type_when_payload_omits_it(self, app, monkeypatch): + def test_app_put_should_preserve_icon_type_when_payload_omits_it(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = app_module.AppApi() method = _unwrap(api.put) payload = { @@ -234,7 +235,7 @@ class TestAppEndpoints: } ) - def test_app_icon_post_should_forward_icon_type(self, app, monkeypatch): + def test_app_icon_post_should_forward_icon_type(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = app_module.AppIconApi() method = _unwrap(api.post) payload = { @@ -266,7 +267,7 @@ class TestAppEndpoints: class TestOpsTraceEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_ops_trace_query_basic(self): @@ -277,7 +278,7 @@ class TestOpsTraceEndpoints: payload = TraceConfigPayload(tracing_provider="langfuse", tracing_config={"api_key": "k"}) assert payload.tracing_config["api_key"] == "k" - def test_trace_app_config_get_empty(self, app, monkeypatch): + def test_trace_app_config_get_empty(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = ops_trace_module.TraceAppConfigApi() method = _unwrap(api.get) @@ -292,7 +293,7 @@ class TestOpsTraceEndpoints: assert result == {"has_not_configured": True} - def test_trace_app_config_post_invalid(self, app, monkeypatch): + def test_trace_app_config_post_invalid(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = ops_trace_module.TraceAppConfigApi() method = _unwrap(api.post) @@ -309,7 +310,7 @@ class TestOpsTraceEndpoints: with pytest.raises(BadRequest): method(app_id="app-1") - def test_trace_app_config_delete_not_found(self, app, monkeypatch): + def test_trace_app_config_delete_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = ops_trace_module.TraceAppConfigApi() method = _unwrap(api.delete) @@ -326,7 +327,7 @@ class TestOpsTraceEndpoints: class TestSiteEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_site_response_structure(self): @@ -337,7 +338,7 @@ class TestSiteEndpoints: payload = AppSiteUpdatePayload(default_language="en-US") assert payload.default_language == "en-US" - def test_app_site_update_post(self, app, monkeypatch): + def test_app_site_update_post(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = site_module.AppSite() method = _unwrap(api.post) @@ -375,7 +376,7 @@ class TestSiteEndpoints: assert isinstance(result, dict) assert result["title"] == "My Site" - def test_app_site_access_token_reset(self, app, monkeypatch): + def test_app_site_access_token_reset(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = site_module.AppSiteAccessTokenReset() method = _unwrap(api.post) @@ -427,7 +428,7 @@ class TestWorkflowEndpoints: class TestWorkflowAppLogEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_workflow_app_log_query(self): @@ -438,7 +439,7 @@ class TestWorkflowAppLogEndpoints: query = WorkflowAppLogQuery(detail="true") assert query.detail is True - def test_workflow_app_log_api_get(self, app, monkeypatch): + def test_workflow_app_log_api_get(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = workflow_app_log_module.WorkflowAppLogApi() method = _unwrap(api.get) @@ -477,14 +478,14 @@ class TestWorkflowAppLogEndpoints: class TestWorkflowDraftVariableEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_workflow_variable_creation(self): payload = WorkflowDraftVariableUpdatePayload(name="var1", value="test") assert payload.name == "var1" - def test_workflow_variable_collection_get(self, app, monkeypatch): + def test_workflow_variable_collection_get(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = workflow_draft_variable_module.WorkflowVariableCollectionApi() method = _unwrap(api.get) @@ -529,7 +530,7 @@ class TestWorkflowDraftVariableEndpoints: class TestWorkflowStatisticEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_workflow_statistic_time_range(self): @@ -541,7 +542,7 @@ class TestWorkflowStatisticEndpoints: assert query.start is None assert query.end is None - def test_workflow_daily_runs_statistic(self, app, monkeypatch): + def test_workflow_daily_runs_statistic(self, app: Flask, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(workflow_statistic_module, "db", SimpleNamespace(engine=MagicMock())) monkeypatch.setattr( workflow_statistic_module.DifyAPIRepositoryFactory, @@ -567,7 +568,7 @@ class TestWorkflowStatisticEndpoints: assert response.get_json() == {"data": [{"date": "2024-01-01"}]} - def test_workflow_daily_terminals_statistic(self, app, monkeypatch): + def test_workflow_daily_terminals_statistic(self, app: Flask, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(workflow_statistic_module, "db", SimpleNamespace(engine=MagicMock())) monkeypatch.setattr( workflow_statistic_module.DifyAPIRepositoryFactory, @@ -598,7 +599,7 @@ class TestWorkflowStatisticEndpoints: class TestWorkflowTriggerEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_webhook_trigger_payload(self): @@ -608,7 +609,7 @@ class TestWorkflowTriggerEndpoints: enable_payload = ParserEnable(trigger_id="trigger-1", enable_trigger=True) assert enable_payload.enable_trigger is True - def test_webhook_trigger_api_get(self, app, monkeypatch): + def test_webhook_trigger_api_get(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = workflow_trigger_module.WebhookTriggerApi() method = _unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py index 25d19cf35a..bcb6e41ef7 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py +++ b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py @@ -6,6 +6,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from flask import Flask from controllers.console.app import app_import as app_import_module from services.app_dsl_service import ImportStatus @@ -36,10 +37,10 @@ def _install_features(monkeypatch: pytest.MonkeyPatch, enabled: bool) -> None: class TestAppImportApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_import_post_returns_failed_status(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_returns_failed_status(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -57,7 +58,7 @@ class TestAppImportApi: assert status == 400 assert response["status"] == ImportStatus.FAILED - def test_import_post_returns_pending_status(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_returns_pending_status(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -75,7 +76,7 @@ class TestAppImportApi: assert status == 202 assert response["status"] == ImportStatus.PENDING - def test_import_post_updates_webapp_auth_when_enabled(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_updates_webapp_auth_when_enabled(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -96,7 +97,7 @@ class TestAppImportApi: assert status == 200 assert response["status"] == ImportStatus.COMPLETED - def test_import_post_commits_session_on_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_commits_session_on_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -121,7 +122,7 @@ class TestAppImportApi: assert status == 200 assert response["status"] == ImportStatus.COMPLETED - def test_import_post_rolls_back_session_on_failure(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_rolls_back_session_on_failure(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -149,10 +150,10 @@ class TestAppImportApi: class TestAppImportConfirmApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_import_confirm_returns_failed_status(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_confirm_returns_failed_status(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportConfirmApi() method = _unwrap(api.post) @@ -172,10 +173,10 @@ class TestAppImportConfirmApi: class TestAppImportCheckDependenciesApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_import_check_dependencies_returns_result(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_check_dependencies_returns_result(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportCheckDependenciesApi() method = _unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py index 320da85b60..1fcce9ca44 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py @@ -6,6 +6,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.email_register import ( EmailRegisterCheckApi, @@ -16,7 +17,7 @@ from services.account_service import AccountService @pytest.fixture -def app(flask_app_with_containers): +def app(flask_app_with_containers: Flask): return flask_app_with_containers @@ -33,7 +34,7 @@ class TestEmailRegisterSendEmailApi: mock_is_freeze, mock_send_mail, mock_get_account, - app, + app: Flask, ): mock_send_mail.return_value = "token-123" mock_is_freeze.return_value = False @@ -75,7 +76,7 @@ class TestEmailRegisterCheckApi: mock_revoke, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_rate_limit_check.return_value = False mock_get_data.return_value = {"email": "User@Example.com", "code": "4321"} @@ -120,7 +121,7 @@ class TestEmailRegisterResetApi: mock_create_account, mock_login, mock_reset_login_rate, - app, + app: Flask, ): mock_get_data.return_value = {"phase": "register", "email": "Invitee@Example.com"} mock_create_account.return_value = MagicMock() diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py index d2703ed5cc..014c1588fe 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py @@ -6,6 +6,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.forgot_password import ( ForgotPasswordCheckApi, @@ -16,7 +17,7 @@ from services.account_service import AccountService @pytest.fixture -def app(flask_app_with_containers): +def app(flask_app_with_containers: Flask): return flask_app_with_containers @@ -31,7 +32,7 @@ class TestForgotPasswordSendEmailApi: mock_is_ip_limit, mock_send_email, mock_get_account, - app, + app: Flask, ): mock_account = MagicMock() mock_get_account.return_value = mock_account @@ -80,7 +81,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_rate_limit_check.return_value = False mock_get_data.return_value = {"email": "Admin@Example.com", "code": "4321"} @@ -123,7 +124,7 @@ class TestForgotPasswordResetApi: mock_db, mock_get_account, mock_update_account, - app, + app: Flask, ): mock_get_reset_data.return_value = {"phase": "reset", "email": "User@Example.com"} mock_account = MagicMock() diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py index 1eabb45422..55b6a919d8 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.oauth import ( OAuthCallback, @@ -21,7 +22,7 @@ from services.errors.account import AccountRegisterError class TestGetOAuthProviders: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.mark.parametrize( @@ -65,7 +66,7 @@ class TestOAuthLogin: return OAuthLogin() @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -89,7 +90,7 @@ class TestOAuthLogin: mock_redirect, mock_get_providers, resource, - app, + app: Flask, mock_oauth_provider, invite_token, expected_token, @@ -130,7 +131,7 @@ class TestOAuthCallback: return OAuthCallback() @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -164,7 +165,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): mock_config.CONSOLE_WEB_URL = "http://localhost:3000" @@ -217,7 +218,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): mock_config.CONSOLE_WEB_URL = "http://localhost:3000" @@ -261,7 +262,7 @@ class TestOAuthCallback: mock_tenant_service, mock_account_service, resource, - app, + app: Flask, oauth_setup, account_status, expected_redirect, @@ -300,7 +301,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): mock_get_providers.return_value = {"github": oauth_setup["provider"]} @@ -336,7 +337,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): """Defensive test for CLOSED account status handling in OAuth callback. @@ -394,7 +395,7 @@ class TestOAuthCallback: class TestAccountGeneration: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -465,7 +466,7 @@ class TestAccountGeneration: mock_register_service, mock_feature_service, mock_get_account, - app, + app: Flask, user_info, mock_account, allow_register, @@ -504,7 +505,7 @@ class TestAccountGeneration: mock_register_service, mock_feature_service, mock_get_account, - app, + app: Flask, ): user_info = OAuthUserInfo(id="123", name="Test User", email="Upper@Example.com") mock_feature_service.get_system_features.return_value.is_allow_register = True @@ -529,7 +530,7 @@ class TestAccountGeneration: mock_feature_service, mock_tenant_service, mock_get_account, - app, + app: Flask, user_info, mock_account, ): diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py index 50249bcd74..d017e8f2bd 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.error import ( EmailCodeError, @@ -25,7 +26,7 @@ class TestForgotPasswordSendEmailApi: """Test cases for sending password reset emails.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -46,7 +47,7 @@ class TestForgotPasswordSendEmailApi: mock_send_email, mock_get_account, mock_is_ip_limit, - app, + app: Flask, mock_account, ): # Arrange @@ -68,7 +69,7 @@ class TestForgotPasswordSendEmailApi: mock_send_email.assert_called_once() @patch("controllers.console.auth.forgot_password.AccountService.is_email_send_ip_limit") - def test_send_reset_email_ip_rate_limited(self, mock_is_ip_limit, app): + def test_send_reset_email_ip_rate_limited(self, mock_is_ip_limit, app: Flask): """ Test password reset email blocked by IP rate limit. @@ -104,7 +105,7 @@ class TestForgotPasswordSendEmailApi: mock_send_email, mock_get_account, mock_is_ip_limit, - app, + app: Flask, mock_account, language_input, expected_language, @@ -138,7 +139,7 @@ class TestForgotPasswordCheckApi: """Test cases for verifying password reset codes.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @@ -153,7 +154,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_get_data, mock_is_rate_limit, - app, + app: Flask, ): """ Test successful verification code validation. @@ -200,7 +201,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_get_data, mock_is_rate_limit, - app, + app: Flask, ): mock_is_rate_limit.return_value = False mock_get_data.return_value = {"email": "User@Example.com", "code": "999888"} @@ -221,7 +222,7 @@ class TestForgotPasswordCheckApi: mock_reset_rate_limit.assert_called_once_with("user@example.com") @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") - def test_verify_code_rate_limited(self, mock_is_rate_limit, app): + def test_verify_code_rate_limited(self, mock_is_rate_limit, app: Flask): """ Test code verification blocked by rate limit. @@ -244,7 +245,7 @@ class TestForgotPasswordCheckApi: @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_verify_code_invalid_token(self, mock_get_data, mock_is_rate_limit, app): + def test_verify_code_invalid_token(self, mock_get_data, mock_is_rate_limit, app: Flask): """ Test code verification with invalid token. @@ -267,7 +268,7 @@ class TestForgotPasswordCheckApi: @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_verify_code_email_mismatch(self, mock_get_data, mock_is_rate_limit, app): + def test_verify_code_email_mismatch(self, mock_get_data, mock_is_rate_limit, app: Flask): """ Test code verification with mismatched email. @@ -292,7 +293,7 @@ class TestForgotPasswordCheckApi: @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") @patch("controllers.console.auth.forgot_password.AccountService.add_forgot_password_error_rate_limit") - def test_verify_code_wrong_code(self, mock_add_rate_limit, mock_get_data, mock_is_rate_limit, app): + def test_verify_code_wrong_code(self, mock_add_rate_limit, mock_get_data, mock_is_rate_limit, app: Flask): """ Test code verification with incorrect code. @@ -321,7 +322,7 @@ class TestForgotPasswordResetApi: """Test cases for resetting password with verified token.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -344,7 +345,7 @@ class TestForgotPasswordResetApi: mock_get_account, mock_revoke_token, mock_get_data, - app, + app: Flask, mock_account, ): """ @@ -375,7 +376,7 @@ class TestForgotPasswordResetApi: mock_revoke_token.assert_called_once_with("valid_token") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_reset_password_mismatch(self, mock_get_data, app): + def test_reset_password_mismatch(self, mock_get_data, app: Flask): """ Test password reset with mismatched passwords. @@ -397,7 +398,7 @@ class TestForgotPasswordResetApi: api.post() @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_reset_password_invalid_token(self, mock_get_data, app): + def test_reset_password_invalid_token(self, mock_get_data, app: Flask): """ Test password reset with invalid token. @@ -418,7 +419,7 @@ class TestForgotPasswordResetApi: api.post() @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_reset_password_wrong_phase(self, mock_get_data, app): + def test_reset_password_wrong_phase(self, mock_get_data, app: Flask): """ Test password reset with token not in reset phase. @@ -442,7 +443,7 @@ class TestForgotPasswordResetApi: @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") @patch("controllers.console.auth.forgot_password.AccountService.revoke_reset_password_token") @patch("controllers.console.auth.forgot_password.AccountService.get_account_by_email_with_case_fallback") - def test_reset_password_account_not_found(self, mock_get_account, mock_revoke_token, mock_get_data, app): + def test_reset_password_account_not_found(self, mock_get_account, mock_revoke_token, mock_get_data, app: Flask): """ Test password reset for non-existent account. diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py index d5ae95dfb7..7aa4aff1cc 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from controllers.console import console_ns @@ -26,10 +27,10 @@ def unwrap(func): class TestPipelineTemplateListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = PipelineTemplateListApi() method = unwrap(api.get) @@ -50,10 +51,10 @@ class TestPipelineTemplateListApi: class TestPipelineTemplateDetailApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = PipelineTemplateDetailApi() method = unwrap(api.get) @@ -74,7 +75,7 @@ class TestPipelineTemplateDetailApi: assert status == 200 assert response == template - def test_get_returns_404_when_template_not_found(self, app): + def test_get_returns_404_when_template_not_found(self, app: Flask): api = PipelineTemplateDetailApi() method = unwrap(api.get) @@ -93,7 +94,7 @@ class TestPipelineTemplateDetailApi: assert status == 404 assert "error" in response - def test_get_returns_404_for_customized_type_not_found(self, app): + def test_get_returns_404_for_customized_type_not_found(self, app: Flask): api = PipelineTemplateDetailApi() method = unwrap(api.get) @@ -115,10 +116,10 @@ class TestPipelineTemplateDetailApi: class TestCustomizedPipelineTemplateApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = CustomizedPipelineTemplateApi() method = unwrap(api.patch) @@ -140,7 +141,7 @@ class TestCustomizedPipelineTemplateApi: update_mock.assert_called_once() assert response == 200 - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = CustomizedPipelineTemplateApi() method = unwrap(api.delete) @@ -155,7 +156,7 @@ class TestCustomizedPipelineTemplateApi: delete_mock.assert_called_once_with("tpl-1") assert response == 200 - def test_post_success(self, app, db_session_with_containers: Session): + def test_post_success(self, app: Flask, db_session_with_containers: Session): api = CustomizedPipelineTemplateApi() method = unwrap(api.post) @@ -182,7 +183,7 @@ class TestCustomizedPipelineTemplateApi: assert status == 200 assert response == {"data": "yaml-data"} - def test_post_template_not_found(self, app): + def test_post_template_not_found(self, app: Flask): api = CustomizedPipelineTemplateApi() method = unwrap(api.post) @@ -193,10 +194,10 @@ class TestCustomizedPipelineTemplateApi: class TestPublishCustomizedPipelineTemplateApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = PublishCustomizedPipelineTemplateApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py index 64e3de2ca3..7624c1150f 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden import services @@ -24,13 +25,13 @@ def unwrap(func): class TestCreateRagPipelineDatasetApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def _valid_payload(self): return {"yaml_content": "name: test"} - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -58,7 +59,7 @@ class TestCreateRagPipelineDatasetApi: assert status == 201 assert response == import_info - def test_post_forbidden_non_editor(self, app): + def test_post_forbidden_non_editor(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -76,7 +77,7 @@ class TestCreateRagPipelineDatasetApi: with pytest.raises(Forbidden): method(api) - def test_post_dataset_name_duplicate(self, app): + def test_post_dataset_name_duplicate(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -101,7 +102,7 @@ class TestCreateRagPipelineDatasetApi: with pytest.raises(DatasetNameDuplicateError): method(api) - def test_post_invalid_payload(self, app): + def test_post_invalid_payload(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -122,10 +123,10 @@ class TestCreateRagPipelineDatasetApi: class TestCreateEmptyRagPipelineDatasetApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = CreateEmptyRagPipelineDatasetApi() method = unwrap(api.post) @@ -152,7 +153,7 @@ class TestCreateEmptyRagPipelineDatasetApi: assert status == 201 assert response == {"id": "ds-1"} - def test_post_forbidden_non_editor(self, app): + def test_post_forbidden_non_editor(self, app: Flask): api = CreateEmptyRagPipelineDatasetApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py index cb67892878..44eb5c336c 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console import console_ns from controllers.console.datasets.rag_pipeline.rag_pipeline_import import ( @@ -25,7 +26,7 @@ def unwrap(func): class TestRagPipelineImportApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def _payload(self, mode="create"): @@ -35,7 +36,7 @@ class TestRagPipelineImportApi: "name": "Test", } - def test_post_success_200(self, app): + def test_post_success_200(self, app: Flask): api = RagPipelineImportApi() method = unwrap(api.post) @@ -65,7 +66,7 @@ class TestRagPipelineImportApi: assert status == 200 assert response == {"status": "success"} - def test_post_failed_400(self, app): + def test_post_failed_400(self, app: Flask): api = RagPipelineImportApi() method = unwrap(api.post) @@ -95,7 +96,7 @@ class TestRagPipelineImportApi: assert status == 400 assert response == {"status": "failed"} - def test_post_pending_202(self, app): + def test_post_pending_202(self, app: Flask): api = RagPipelineImportApi() method = unwrap(api.post) @@ -128,10 +129,10 @@ class TestRagPipelineImportApi: class TestRagPipelineImportConfirmApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_confirm_success(self, app): + def test_confirm_success(self, app: Flask): api = RagPipelineImportConfirmApi() method = unwrap(api.post) @@ -159,7 +160,7 @@ class TestRagPipelineImportConfirmApi: assert status == 200 assert response == {"ok": True} - def test_confirm_failed(self, app): + def test_confirm_failed(self, app: Flask): api = RagPipelineImportConfirmApi() method = unwrap(api.post) @@ -190,10 +191,10 @@ class TestRagPipelineImportConfirmApi: class TestRagPipelineImportCheckDependenciesApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = RagPipelineImportCheckDependenciesApi() method = unwrap(api.get) @@ -219,10 +220,10 @@ class TestRagPipelineImportCheckDependenciesApi: class TestRagPipelineExportApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_with_include_secret(self, app): + def test_get_with_include_secret(self, app: Flask): api = RagPipelineExportApi() method = unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py index c1f3122c2b..c17a83cad3 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py @@ -7,6 +7,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest, Forbidden, HTTPException, NotFound @@ -45,10 +46,10 @@ def unwrap(func): class TestDraftWorkflowApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_draft_success(self, app): + def test_get_draft_success(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.get) @@ -68,7 +69,7 @@ class TestDraftWorkflowApi: result = method(api, pipeline) assert result == workflow - def test_get_draft_not_exist(self, app): + def test_get_draft_not_exist(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.get) @@ -86,7 +87,7 @@ class TestDraftWorkflowApi: with pytest.raises(DraftWorkflowNotExist): method(api, pipeline) - def test_sync_hash_not_match(self, app): + def test_sync_hash_not_match(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.post) @@ -111,7 +112,7 @@ class TestDraftWorkflowApi: with pytest.raises(DraftWorkflowNotSync): method(api, pipeline) - def test_sync_invalid_text_plain(self, app): + def test_sync_invalid_text_plain(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.post) @@ -128,7 +129,7 @@ class TestDraftWorkflowApi: response, status = method(api, pipeline) assert status == 400 - def test_restore_published_workflow_to_draft_success(self, app): + def test_restore_published_workflow_to_draft_success(self, app: Flask): api = RagPipelineDraftWorkflowRestoreApi() method = unwrap(api.post) @@ -155,7 +156,7 @@ class TestDraftWorkflowApi: assert result["result"] == "success" assert result["hash"] == "restored-hash" - def test_restore_published_workflow_to_draft_not_found(self, app): + def test_restore_published_workflow_to_draft_not_found(self, app: Flask): api = RagPipelineDraftWorkflowRestoreApi() method = unwrap(api.post) @@ -179,7 +180,7 @@ class TestDraftWorkflowApi: with pytest.raises(NotFound): method(api, pipeline, "published-workflow") - def test_restore_published_workflow_to_draft_returns_400_for_draft_source(self, app): + def test_restore_published_workflow_to_draft_returns_400_for_draft_source(self, app: Flask): api = RagPipelineDraftWorkflowRestoreApi() method = unwrap(api.post) @@ -211,10 +212,10 @@ class TestDraftWorkflowApi: class TestDraftRunNodes: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_iteration_node_success(self, app): + def test_iteration_node_success(self, app: Flask): api = RagPipelineDraftRunIterationNodeApi() method = unwrap(api.post) @@ -240,7 +241,7 @@ class TestDraftRunNodes: result = method(api, pipeline, "node") assert result == {"ok": True} - def test_iteration_node_conversation_not_exists(self, app): + def test_iteration_node_conversation_not_exists(self, app: Flask): api = RagPipelineDraftRunIterationNodeApi() method = unwrap(api.post) @@ -262,7 +263,7 @@ class TestDraftRunNodes: with pytest.raises(NotFound): method(api, pipeline, "node") - def test_loop_node_success(self, app): + def test_loop_node_success(self, app: Flask): api = RagPipelineDraftRunLoopNodeApi() method = unwrap(api.post) @@ -290,10 +291,10 @@ class TestDraftRunNodes: class TestPipelineRunApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_draft_run_success(self, app): + def test_draft_run_success(self, app: Flask): api = DraftRagPipelineRunApi() method = unwrap(api.post) @@ -325,7 +326,7 @@ class TestPipelineRunApis: ): assert method(api, pipeline) == {"ok": True} - def test_draft_run_rate_limit(self, app): + def test_draft_run_rate_limit(self, app: Flask): api = DraftRagPipelineRunApi() method = unwrap(api.post) @@ -356,10 +357,10 @@ class TestPipelineRunApis: class TestDraftNodeRun: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_execution_not_found(self, app): + def test_execution_not_found(self, app: Flask): api = RagPipelineDraftNodeRunApi() method = unwrap(api.post) @@ -387,10 +388,10 @@ class TestDraftNodeRun: class TestPublishedPipelineApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_publish_success(self, app, db_session_with_containers: Session): + def test_publish_success(self, app: Flask, db_session_with_containers: Session): from models.dataset import Pipeline api = PublishedRagPipelineApi() @@ -436,10 +437,10 @@ class TestPublishedPipelineApis: class TestMiscApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_task_stop(self, app): + def test_task_stop(self, app: Flask): api = RagPipelineTaskStopApi() method = unwrap(api.post) @@ -460,7 +461,7 @@ class TestMiscApis: stop_mock.assert_called_once() assert result["result"] == "success" - def test_transform_forbidden(self, app): + def test_transform_forbidden(self, app: Flask): api = RagPipelineTransformApi() method = unwrap(api.post) @@ -476,7 +477,7 @@ class TestMiscApis: with pytest.raises(Forbidden): method(api, "ds1") - def test_recommended_plugins(self, app): + def test_recommended_plugins(self, app: Flask): api = RagPipelineRecommendedPluginApi() method = unwrap(api.get) @@ -496,10 +497,10 @@ class TestMiscApis: class TestPublishedRagPipelineRunApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_published_run_success(self, app): + def test_published_run_success(self, app: Flask): api = PublishedRagPipelineRunApi() method = unwrap(api.post) @@ -533,7 +534,7 @@ class TestPublishedRagPipelineRunApi: result = method(api, pipeline) assert result == {"ok": True} - def test_published_run_rate_limit(self, app): + def test_published_run_rate_limit(self, app: Flask): api = PublishedRagPipelineRunApi() method = unwrap(api.post) @@ -565,10 +566,10 @@ class TestPublishedRagPipelineRunApi: class TestDefaultBlockConfigApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_block_config_success(self, app): + def test_get_block_config_success(self, app: Flask): api = DefaultRagPipelineBlockConfigApi() method = unwrap(api.get) @@ -587,7 +588,7 @@ class TestDefaultBlockConfigApi: result = method(api, pipeline, "llm") assert result == {"k": "v"} - def test_get_block_config_invalid_json(self, app): + def test_get_block_config_invalid_json(self, app: Flask): api = DefaultRagPipelineBlockConfigApi() method = unwrap(api.get) @@ -600,10 +601,10 @@ class TestDefaultBlockConfigApi: class TestPublishedAllRagPipelineApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_published_workflows_success(self, app): + def test_get_published_workflows_success(self, app: Flask): api = PublishedAllRagPipelineApi() method = unwrap(api.get) @@ -629,7 +630,7 @@ class TestPublishedAllRagPipelineApi: assert result["items"] == [{"id": "w1"}] assert result["has_more"] is False - def test_get_published_workflows_forbidden(self, app): + def test_get_published_workflows_forbidden(self, app: Flask): api = PublishedAllRagPipelineApi() method = unwrap(api.get) @@ -649,10 +650,10 @@ class TestPublishedAllRagPipelineApi: class TestRagPipelineByIdApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.patch) @@ -682,7 +683,7 @@ class TestRagPipelineByIdApi: assert result == workflow - def test_patch_no_fields(self, app): + def test_patch_no_fields(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.patch) @@ -700,7 +701,7 @@ class TestRagPipelineByIdApi: result, status = method(api, pipeline, "w1") assert status == 400 - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.delete) @@ -720,7 +721,7 @@ class TestRagPipelineByIdApi: workflow_service.delete_workflow.assert_called_once() assert result == (None, 204) - def test_delete_active_workflow_rejected(self, app): + def test_delete_active_workflow_rejected(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.delete) @@ -733,10 +734,10 @@ class TestRagPipelineByIdApi: class TestRagPipelineWorkflowLastRunApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_last_run_success(self, app): + def test_last_run_success(self, app: Flask): api = RagPipelineWorkflowLastRunApi() method = unwrap(api.get) @@ -758,7 +759,7 @@ class TestRagPipelineWorkflowLastRunApi: result = method(api, pipeline, "node1") assert result == node_exec - def test_last_run_not_found(self, app): + def test_last_run_not_found(self, app: Flask): api = RagPipelineWorkflowLastRunApi() method = unwrap(api.get) @@ -780,10 +781,10 @@ class TestRagPipelineWorkflowLastRunApi: class TestRagPipelineDatasourceVariableApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_set_datasource_variables_success(self, app): + def test_set_datasource_variables_success(self, app: Flask): api = RagPipelineDatasourceVariableApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py index 1c4c6a899f..b59009f7c4 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, PropertyMock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.console.datasets import data_source @@ -51,10 +52,10 @@ def mock_engine(): class TestDataSourceApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, patch_tenant): + def test_get_success(self, app: Flask, patch_tenant): api = DataSourceApi() method = unwrap(api.get) @@ -78,7 +79,7 @@ class TestDataSourceApi: assert status == 200 assert response["data"][0]["is_bound"] is True - def test_get_no_bindings(self, app, patch_tenant): + def test_get_no_bindings(self, app: Flask, patch_tenant): api = DataSourceApi() method = unwrap(api.get) @@ -94,7 +95,7 @@ class TestDataSourceApi: assert status == 200 assert response["data"] == [] - def test_patch_enable_binding(self, app, patch_tenant, mock_engine): + def test_patch_enable_binding(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -115,7 +116,7 @@ class TestDataSourceApi: assert status == 200 assert binding.disabled is False - def test_patch_disable_binding(self, app, patch_tenant, mock_engine): + def test_patch_disable_binding(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -136,7 +137,7 @@ class TestDataSourceApi: assert status == 200 assert binding.disabled is True - def test_patch_binding_not_found(self, app, patch_tenant, mock_engine): + def test_patch_binding_not_found(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -151,7 +152,7 @@ class TestDataSourceApi: with pytest.raises(NotFound): method(api, "b1", "enable") - def test_patch_enable_already_enabled(self, app, patch_tenant, mock_engine): + def test_patch_enable_already_enabled(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -168,7 +169,7 @@ class TestDataSourceApi: with pytest.raises(ValueError): method(api, "b1", "enable") - def test_patch_disable_already_disabled(self, app, patch_tenant, mock_engine): + def test_patch_disable_already_disabled(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -188,10 +189,10 @@ class TestDataSourceApi: class TestDataSourceNotionListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_credential_not_found(self, app, patch_tenant): + def test_get_credential_not_found(self, app: Flask, patch_tenant): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -205,7 +206,7 @@ class TestDataSourceNotionListApi: with pytest.raises(NotFound): method(api) - def test_get_success_no_dataset_id(self, app, patch_tenant, mock_engine): + def test_get_success_no_dataset_id(self, app: Flask, patch_tenant, mock_engine): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -246,7 +247,7 @@ class TestDataSourceNotionListApi: assert status == 200 - def test_get_success_with_dataset_id(self, app, patch_tenant, mock_engine): + def test_get_success_with_dataset_id(self, app: Flask, patch_tenant, mock_engine): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -299,7 +300,7 @@ class TestDataSourceNotionListApi: assert status == 200 - def test_get_invalid_dataset_type(self, app, patch_tenant, mock_engine): + def test_get_invalid_dataset_type(self, app: Flask, patch_tenant, mock_engine): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -323,10 +324,10 @@ class TestDataSourceNotionListApi: class TestDataSourceNotionApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_preview_success(self, app, patch_tenant): + def test_get_preview_success(self, app: Flask, patch_tenant): api = DataSourceNotionApi() method = unwrap(api.get) @@ -347,7 +348,7 @@ class TestDataSourceNotionApi: assert status == 200 - def test_post_indexing_estimate_success(self, app, patch_tenant): + def test_post_indexing_estimate_success(self, app: Flask, patch_tenant): api = DataSourceNotionApi() method = unwrap(api.post) @@ -381,10 +382,10 @@ class TestDataSourceNotionApi: class TestDataSourceNotionDatasetSyncApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, patch_tenant): + def test_get_success(self, app: Flask, patch_tenant): api = DataSourceNotionDatasetSyncApi() method = unwrap(api.get) @@ -407,7 +408,7 @@ class TestDataSourceNotionDatasetSyncApi: assert status == 200 - def test_get_dataset_not_found(self, app, patch_tenant): + def test_get_dataset_not_found(self, app: Flask, patch_tenant): api = DataSourceNotionDatasetSyncApi() method = unwrap(api.get) @@ -424,10 +425,10 @@ class TestDataSourceNotionDatasetSyncApi: class TestDataSourceNotionDocumentSyncApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, patch_tenant): + def test_get_success(self, app: Flask, patch_tenant): api = DataSourceNotionDocumentSyncApi() method = unwrap(api.get) @@ -450,7 +451,7 @@ class TestDataSourceNotionDocumentSyncApi: assert status == 200 - def test_get_document_not_found(self, app, patch_tenant): + def test_get_document_not_found(self, app: Flask, patch_tenant): api = DataSourceNotionDocumentSyncApi() method = unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py b/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py index 83492048ef..917aa35fe6 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py +++ b/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound import controllers.console.explore.conversation as conversation_module @@ -53,10 +54,10 @@ def user(): class TestConversationListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, chat_app, user): + def test_get_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationListApi() method = unwrap(api.get) @@ -81,7 +82,7 @@ class TestConversationListApi: assert result["has_more"] is False assert len(result["data"]) == 2 - def test_last_conversation_not_exists(self, app, chat_app, user): + def test_last_conversation_not_exists(self, app: Flask, chat_app, user): api = conversation_module.ConversationListApi() method = unwrap(api.get) @@ -97,7 +98,7 @@ class TestConversationListApi: with pytest.raises(NotFound): method(chat_app) - def test_wrong_app_mode(self, app, non_chat_app): + def test_wrong_app_mode(self, app: Flask, non_chat_app): api = conversation_module.ConversationListApi() method = unwrap(api.get) @@ -108,10 +109,10 @@ class TestConversationListApi: class TestConversationApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_delete_success(self, app, chat_app, user): + def test_delete_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationApi() method = unwrap(api.delete) @@ -129,7 +130,7 @@ class TestConversationApi: assert status == 204 assert body["result"] == "success" - def test_delete_not_found(self, app, chat_app, user): + def test_delete_not_found(self, app: Flask, chat_app, user): api = conversation_module.ConversationApi() method = unwrap(api.delete) @@ -145,7 +146,7 @@ class TestConversationApi: with pytest.raises(NotFound): method(chat_app, "cid") - def test_delete_wrong_app_mode(self, app, non_chat_app): + def test_delete_wrong_app_mode(self, app: Flask, non_chat_app): api = conversation_module.ConversationApi() method = unwrap(api.delete) @@ -156,10 +157,10 @@ class TestConversationApi: class TestConversationRenameApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_rename_success(self, app, chat_app, user): + def test_rename_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationRenameApi() method = unwrap(api.post) @@ -178,7 +179,7 @@ class TestConversationRenameApi: assert result["id"] == "cid" - def test_rename_not_found(self, app, chat_app, user): + def test_rename_not_found(self, app: Flask, chat_app, user): api = conversation_module.ConversationRenameApi() method = unwrap(api.post) @@ -197,10 +198,10 @@ class TestConversationRenameApi: class TestConversationPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_pin_success(self, app, chat_app, user): + def test_pin_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationPinApi() method = unwrap(api.patch) @@ -219,10 +220,10 @@ class TestConversationPinApi: class TestConversationUnPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_unpin_success(self, app, chat_app, user): + def test_unpin_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationUnPinApi() method = unwrap(api.patch) diff --git a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py index f2e7104b18..d944613886 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py +++ b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py @@ -6,6 +6,7 @@ import json from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden from controllers.console.workspace.tool_providers import ( @@ -60,7 +61,7 @@ def _mock_user_tenant(): @pytest.fixture -def client(flask_app_with_containers): +def client(flask_app_with_containers: Flask): return flask_app_with_containers.test_client() @@ -147,10 +148,10 @@ class TestUtils: class TestToolProviderListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ToolProviderListApi() method = unwrap(api.get) @@ -170,10 +171,10 @@ class TestToolProviderListApi: class TestBuiltinProviderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_list_tools(self, app): + def test_list_tools(self, app: Flask): api = ToolBuiltinProviderListToolsApi() method = unwrap(api.get) @@ -190,7 +191,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == [{"a": 1}] - def test_info(self, app): + def test_info(self, app: Flask): api = ToolBuiltinProviderInfoApi() method = unwrap(api.get) @@ -207,7 +208,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == {"x": 1} - def test_delete(self, app): + def test_delete(self, app: Flask): api = ToolBuiltinProviderDeleteApi() method = unwrap(api.post) @@ -224,7 +225,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["result"] == "success" - def test_add_invalid_type(self, app): + def test_add_invalid_type(self, app: Flask): api = ToolBuiltinProviderAddApi() method = unwrap(api.post) @@ -238,7 +239,7 @@ class TestBuiltinProviderApis: with pytest.raises(ValueError): method(api, "provider") - def test_add_success(self, app): + def test_add_success(self, app: Flask): api = ToolBuiltinProviderAddApi() method = unwrap(api.post) @@ -257,7 +258,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["id"] == 1 - def test_update(self, app): + def test_update(self, app: Flask): api = ToolBuiltinProviderUpdateApi() method = unwrap(api.post) @@ -276,7 +277,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["ok"] - def test_get_credentials(self, app): + def test_get_credentials(self, app: Flask): api = ToolBuiltinProviderGetCredentialsApi() method = unwrap(api.get) @@ -293,7 +294,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == {"k": "v"} - def test_icon(self, app): + def test_icon(self, app: Flask): api = ToolBuiltinProviderIconApi() method = unwrap(api.get) @@ -307,7 +308,7 @@ class TestBuiltinProviderApis: response = method(api, "provider") assert response.mimetype == "image/png" - def test_credentials_schema(self, app): + def test_credentials_schema(self, app: Flask): api = ToolBuiltinProviderCredentialsSchemaApi() method = unwrap(api.get) @@ -324,7 +325,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider", "oauth2") == {"schema": {}} - def test_set_default_credential(self, app): + def test_set_default_credential(self, app: Flask): api = ToolBuiltinProviderSetDefaultApi() method = unwrap(api.post) @@ -341,7 +342,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["ok"] - def test_get_credential_info(self, app): + def test_get_credential_info(self, app: Flask): api = ToolBuiltinProviderGetCredentialInfoApi() method = unwrap(api.get) @@ -358,7 +359,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == {"info": "x"} - def test_get_oauth_client_schema(self, app): + def test_get_oauth_client_schema(self, app: Flask): api = ToolBuiltinProviderGetOauthClientSchemaApi() method = unwrap(api.get) @@ -378,10 +379,10 @@ class TestBuiltinProviderApis: class TestApiProviderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_add(self, app): + def test_add(self, app: Flask): api = ToolApiProviderAddApi() method = unwrap(api.post) @@ -406,7 +407,7 @@ class TestApiProviderApis: ): assert method(api)["id"] == 1 - def test_remote_schema(self, app): + def test_remote_schema(self, app: Flask): api = ToolApiProviderGetRemoteSchemaApi() method = unwrap(api.get) @@ -423,7 +424,7 @@ class TestApiProviderApis: ): assert method(api)["schema"] == "x" - def test_list_tools(self, app): + def test_list_tools(self, app: Flask): api = ToolApiProviderListToolsApi() method = unwrap(api.get) @@ -440,7 +441,7 @@ class TestApiProviderApis: ): assert method(api) == [{"tool": 1}] - def test_update(self, app): + def test_update(self, app: Flask): api = ToolApiProviderUpdateApi() method = unwrap(api.post) @@ -468,7 +469,7 @@ class TestApiProviderApis: ): assert method(api)["ok"] - def test_delete(self, app): + def test_delete(self, app: Flask): api = ToolApiProviderDeleteApi() method = unwrap(api.post) @@ -485,7 +486,7 @@ class TestApiProviderApis: ): assert method(api)["result"] == "success" - def test_get(self, app): + def test_get(self, app: Flask): api = ToolApiProviderGetApi() method = unwrap(api.get) @@ -505,10 +506,10 @@ class TestApiProviderApis: class TestWorkflowApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_create(self, app): + def test_create(self, app: Flask): api = ToolWorkflowProviderCreateApi() method = unwrap(api.post) @@ -534,7 +535,7 @@ class TestWorkflowApis: ): assert method(api)["id"] == 1 - def test_update_invalid(self, app): + def test_update_invalid(self, app: Flask): api = ToolWorkflowProviderUpdateApi() method = unwrap(api.post) @@ -560,7 +561,7 @@ class TestWorkflowApis: result = method(api) assert result["ok"] - def test_delete(self, app): + def test_delete(self, app: Flask): api = ToolWorkflowProviderDeleteApi() method = unwrap(api.post) @@ -577,7 +578,7 @@ class TestWorkflowApis: ): assert method(api)["ok"] - def test_get_error(self, app): + def test_get_error(self, app: Flask): api = ToolWorkflowProviderGetApi() method = unwrap(api.get) @@ -594,10 +595,10 @@ class TestWorkflowApis: class TestLists: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_builtin_list(self, app): + def test_builtin_list(self, app: Flask): api = ToolBuiltinListApi() method = unwrap(api.get) @@ -617,7 +618,7 @@ class TestLists: ): assert method(api) == [{"x": 1}] - def test_api_list(self, app): + def test_api_list(self, app: Flask): api = ToolApiListApi() method = unwrap(api.get) @@ -637,7 +638,7 @@ class TestLists: ): assert method(api) == [{"x": 1}] - def test_workflow_list(self, app): + def test_workflow_list(self, app: Flask): api = ToolWorkflowListApi() method = unwrap(api.get) @@ -660,10 +661,10 @@ class TestLists: class TestLabels: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_labels(self, app): + def test_labels(self, app: Flask): api = ToolLabelsApi() method = unwrap(api.get) @@ -679,10 +680,10 @@ class TestLabels: class TestOAuth: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_oauth_no_client(self, app): + def test_oauth_no_client(self, app: Flask): api = ToolPluginOAuthApi() method = unwrap(api.get) @@ -700,7 +701,7 @@ class TestOAuth: with pytest.raises(Forbidden): method(api, "provider") - def test_oauth_callback_no_cookie(self, app): + def test_oauth_callback_no_cookie(self, app: Flask): api = ToolOAuthCallback() method = unwrap(api.get) @@ -711,10 +712,10 @@ class TestOAuth: class TestOAuthCustomClient: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_save_custom_client(self, app): + def test_save_custom_client(self, app: Flask): api = ToolOAuthCustomClient() method = unwrap(api.post) @@ -731,7 +732,7 @@ class TestOAuthCustomClient: ): assert method(api, "provider")["ok"] - def test_get_custom_client(self, app): + def test_get_custom_client(self, app: Flask): api = ToolOAuthCustomClient() method = unwrap(api.get) @@ -748,7 +749,7 @@ class TestOAuthCustomClient: ): assert method(api, "provider") == {"client_id": "x"} - def test_delete_custom_client(self, app): + def test_delete_custom_client(self, app: Flask): api = ToolOAuthCustomClient() method = unwrap(api.delete) diff --git a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py index ca8195af53..e41adccf3c 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py +++ b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, Forbidden from controllers.console.workspace.trigger_providers import ( @@ -45,10 +46,10 @@ def mock_user(): class TestTriggerProviderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_icon_success(self, app): + def test_icon_success(self, app: Flask): api = TriggerProviderIconApi() method = unwrap(api.get) @@ -62,7 +63,7 @@ class TestTriggerProviderApis: ): assert method(api, "github") == "icon" - def test_list_providers(self, app): + def test_list_providers(self, app: Flask): api = TriggerProviderListApi() method = unwrap(api.get) @@ -76,7 +77,7 @@ class TestTriggerProviderApis: ): assert method(api) == [] - def test_provider_info(self, app): + def test_provider_info(self, app: Flask): api = TriggerProviderInfoApi() method = unwrap(api.get) @@ -93,10 +94,10 @@ class TestTriggerProviderApis: class TestTriggerSubscriptionListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = TriggerSubscriptionListApi() method = unwrap(api.get) @@ -110,7 +111,7 @@ class TestTriggerSubscriptionListApi: ): assert method(api, "github") == [] - def test_list_invalid_provider(self, app): + def test_list_invalid_provider(self, app: Flask): api = TriggerSubscriptionListApi() method = unwrap(api.get) @@ -128,10 +129,10 @@ class TestTriggerSubscriptionListApi: class TestTriggerSubscriptionBuilderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_create_builder(self, app): + def test_create_builder(self, app: Flask): api = TriggerSubscriptionBuilderCreateApi() method = unwrap(api.post) @@ -146,7 +147,7 @@ class TestTriggerSubscriptionBuilderApis: result = method(api, "github") assert "subscription_builder" in result - def test_get_builder(self, app): + def test_get_builder(self, app: Flask): api = TriggerSubscriptionBuilderGetApi() method = unwrap(api.get) @@ -159,7 +160,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert method(api, "github", "b1") == {"id": "b1"} - def test_verify_builder(self, app): + def test_verify_builder(self, app: Flask): api = TriggerSubscriptionBuilderVerifyApi() method = unwrap(api.post) @@ -173,7 +174,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert method(api, "github", "b1") == {"ok": True} - def test_verify_builder_error(self, app): + def test_verify_builder_error(self, app: Flask): api = TriggerSubscriptionBuilderVerifyApi() method = unwrap(api.post) @@ -188,7 +189,7 @@ class TestTriggerSubscriptionBuilderApis: with pytest.raises(ValueError): method(api, "github", "b1") - def test_update_builder(self, app): + def test_update_builder(self, app: Flask): api = TriggerSubscriptionBuilderUpdateApi() method = unwrap(api.post) @@ -202,7 +203,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert method(api, "github", "b1") == {"id": "b1"} - def test_logs(self, app): + def test_logs(self, app: Flask): api = TriggerSubscriptionBuilderLogsApi() method = unwrap(api.get) @@ -219,7 +220,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert "logs" in method(api, "github", "b1") - def test_build(self, app): + def test_build(self, app: Flask): api = TriggerSubscriptionBuilderBuildApi() method = unwrap(api.post) @@ -236,10 +237,10 @@ class TestTriggerSubscriptionBuilderApis: class TestTriggerSubscriptionCrud: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_update_rename_only(self, app): + def test_update_rename_only(self, app: Flask): api = TriggerSubscriptionUpdateApi() method = unwrap(api.post) @@ -258,7 +259,7 @@ class TestTriggerSubscriptionCrud: ): assert method(api, "s1") == 200 - def test_update_not_found(self, app): + def test_update_not_found(self, app: Flask): api = TriggerSubscriptionUpdateApi() method = unwrap(api.post) @@ -273,7 +274,7 @@ class TestTriggerSubscriptionCrud: with pytest.raises(NotFoundError): method(api, "x") - def test_update_rebuild(self, app): + def test_update_rebuild(self, app: Flask): api = TriggerSubscriptionUpdateApi() method = unwrap(api.post) @@ -296,7 +297,7 @@ class TestTriggerSubscriptionCrud: ): assert method(api, "s1") == 200 - def test_delete_subscription(self, app): + def test_delete_subscription(self, app: Flask): api = TriggerSubscriptionDeleteApi() method = unwrap(api.post) @@ -319,7 +320,7 @@ class TestTriggerSubscriptionCrud: assert result["result"] == "success" - def test_delete_subscription_value_error(self, app): + def test_delete_subscription_value_error(self, app: Flask): api = TriggerSubscriptionDeleteApi() method = unwrap(api.post) @@ -342,10 +343,10 @@ class TestTriggerSubscriptionCrud: class TestTriggerOAuthApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_oauth_authorize_success(self, app): + def test_oauth_authorize_success(self, app: Flask): api = TriggerOAuthAuthorizeApi() method = unwrap(api.get) @@ -372,7 +373,7 @@ class TestTriggerOAuthApis: resp = method(api, "github") assert resp.status_code == 200 - def test_oauth_authorize_no_client(self, app): + def test_oauth_authorize_no_client(self, app: Flask): api = TriggerOAuthAuthorizeApi() method = unwrap(api.get) @@ -387,7 +388,7 @@ class TestTriggerOAuthApis: with pytest.raises(NotFoundError): method(api, "github") - def test_oauth_callback_forbidden(self, app): + def test_oauth_callback_forbidden(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -395,7 +396,7 @@ class TestTriggerOAuthApis: with pytest.raises(Forbidden): method(api, "github") - def test_oauth_callback_success(self, app): + def test_oauth_callback_success(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -425,7 +426,7 @@ class TestTriggerOAuthApis: resp = method(api, "github") assert resp.status_code == 302 - def test_oauth_callback_no_oauth_client(self, app): + def test_oauth_callback_no_oauth_client(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -449,7 +450,7 @@ class TestTriggerOAuthApis: with pytest.raises(Forbidden): method(api, "github") - def test_oauth_callback_empty_credentials(self, app): + def test_oauth_callback_empty_credentials(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -480,10 +481,10 @@ class TestTriggerOAuthApis: class TestTriggerOAuthClientManageApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_client(self, app): + def test_get_client(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.get) @@ -510,7 +511,7 @@ class TestTriggerOAuthClientManageApi: result = method(api, "github") assert "configured" in result - def test_post_client(self, app): + def test_post_client(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.post) @@ -524,7 +525,7 @@ class TestTriggerOAuthClientManageApi: ): assert method(api, "github") == {"ok": True} - def test_delete_client(self, app): + def test_delete_client(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.delete) @@ -538,7 +539,7 @@ class TestTriggerOAuthClientManageApi: ): assert method(api, "github") == {"ok": True} - def test_oauth_client_post_value_error(self, app): + def test_oauth_client_post_value_error(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.post) @@ -556,10 +557,10 @@ class TestTriggerOAuthClientManageApi: class TestTriggerSubscriptionVerifyApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_verify_success(self, app): + def test_verify_success(self, app: Flask): api = TriggerSubscriptionVerifyApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py b/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py index 9b913d6d3d..b73d28e4c4 100644 --- a/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py +++ b/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py @@ -18,6 +18,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound @@ -217,10 +218,20 @@ class TestTagUnbindingPayload: """Test suite for TagUnbindingPayload Pydantic model.""" def test_payload_with_valid_data(self): - payload = TagUnbindingPayload(tag_id="tag_123", target_id="dataset_456") - assert payload.tag_id == "tag_123" + payload = TagUnbindingPayload(tag_ids=["tag_123"], target_id="dataset_456") + assert payload.tag_ids == ["tag_123"] assert payload.target_id == "dataset_456" + def test_payload_normalizes_legacy_tag_id(self): + payload = TagUnbindingPayload(tag_id="tag_123", target_id="dataset_456") + assert payload.tag_ids == ["tag_123"] + assert payload.target_id == "dataset_456" + + def test_payload_rejects_empty_tag_ids(self): + with pytest.raises(ValueError) as exc_info: + TagUnbindingPayload(tag_ids=[], target_id="dataset_456") + assert "Tag IDs is required" in str(exc_info.value) + # --------------------------------------------------------------------------- # Helpers @@ -236,7 +247,7 @@ def _unwrap(method): @pytest.fixture -def app(flask_app_with_containers): +def app(flask_app_with_containers: Flask): # Uses the full containerised app so that Flask config, extensions, and # blueprint registrations match production. Most tests mock the service # layer to isolate controller logic; a few (e.g. test_list_tags_from_db) @@ -280,7 +291,7 @@ class TestDatasetListApiGet: mock_current_user, mock_provider_mgr, mock_marshal, - app, + app: Flask, mock_tenant, ): from controllers.service_api.dataset.dataset import DatasetListApi @@ -315,7 +326,7 @@ class TestDatasetListApiPost: mock_dataset_svc, mock_current_user, mock_marshal, - app, + app: Flask, mock_tenant, ): from controllers.service_api.dataset.dataset import DatasetListApi @@ -341,7 +352,7 @@ class TestDatasetListApiPost: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_tenant, ): from controllers.service_api.dataset.dataset import DatasetListApi @@ -379,7 +390,7 @@ class TestDatasetApiGet: mock_provider_mgr, mock_marshal, mock_perm_svc, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -429,7 +440,7 @@ class TestDatasetApiGet: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -457,7 +468,7 @@ class TestDatasetApiDelete: mock_dataset_svc, mock_current_user, mock_perm_svc, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -479,7 +490,7 @@ class TestDatasetApiDelete: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -500,7 +511,7 @@ class TestDatasetApiDelete: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -532,7 +543,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -563,7 +574,7 @@ class TestDocumentStatusApiPatch: def test_batch_update_status_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -592,7 +603,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -625,7 +636,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -658,7 +669,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -698,7 +709,7 @@ class TestDatasetTagsApiGet: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -720,7 +731,7 @@ class TestDatasetTagsApiGet: def test_list_tags_from_db( self, mock_current_user, - app, + app: Flask, db_session_with_containers: Session, ): """Integration test: creates real Tag rows and retrieves them @@ -763,7 +774,7 @@ class TestDatasetTagsApiPost: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -786,7 +797,7 @@ class TestDatasetTagsApiPost: mock_tag_svc.save_tags.assert_called_once() @patch("controllers.service_api.dataset.dataset.current_user") - def test_create_tag_forbidden(self, mock_current_user, app): + def test_create_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagsApi mock_current_user.__class__ = Account @@ -815,7 +826,7 @@ class TestDatasetTagsApiPatch: mock_current_user, mock_service_api_ns, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -841,7 +852,7 @@ class TestDatasetTagsApiPatch: mock_tag_svc.update_tags.assert_called_once_with({"name": "Updated Tag", "type": "knowledge"}, "tag-1") @patch("controllers.service_api.dataset.dataset.current_user") - def test_update_tag_forbidden(self, mock_current_user, app): + def test_update_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagsApi mock_current_user.__class__ = Account @@ -869,7 +880,7 @@ class TestDatasetTagsApiDelete: mock_current_user, mock_service_api_ns, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -894,7 +905,7 @@ class TestDatasetTagsApiDelete: mock_tag_svc.delete_tag.assert_called_once_with("tag-1") @patch("libs.login.current_user") - def test_delete_tag_forbidden(self, mock_current_user, app): + def test_delete_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagsApi user_obj = Mock(spec=Account) @@ -922,7 +933,7 @@ class TestDatasetTagsBindingStatusApi: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsBindingStatusApi @@ -952,7 +963,7 @@ class TestDatasetTagBindingApiPost: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagBindingApi @@ -977,7 +988,7 @@ class TestDatasetTagBindingApiPost: ) @patch("controllers.service_api.dataset.dataset.current_user") - def test_bind_tags_forbidden(self, mock_current_user, app): + def test_bind_tags_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagBindingApi mock_current_user.__class__ = Account @@ -1003,7 +1014,37 @@ class TestDatasetTagUnbindingApiPost: self, mock_current_user, mock_tag_svc, - app, + app: Flask, + ): + from controllers.service_api.dataset.dataset import DatasetTagUnbindingApi + + mock_current_user.__class__ = Account + mock_current_user.has_edit_permission = True + mock_current_user.is_dataset_editor = True + mock_tag_svc.delete_tag_binding.return_value = None + + with app.test_request_context( + "/datasets/tags/unbinding", + method="POST", + json={"tag_ids": ["tag-1"], "target_id": "ds-1"}, + ): + api = DatasetTagUnbindingApi() + result = api.post(_=None) + + assert result == ("", 204) + from services.tag_service import TagBindingDeletePayload + + mock_tag_svc.delete_tag_binding.assert_called_once_with( + TagBindingDeletePayload(tag_ids=["tag-1"], target_id="ds-1", type="knowledge") + ) + + @patch("controllers.service_api.dataset.dataset.TagService") + @patch("controllers.service_api.dataset.dataset.current_user") + def test_unbind_legacy_tag_id_success( + self, + mock_current_user, + mock_tag_svc, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagUnbindingApi @@ -1024,11 +1065,11 @@ class TestDatasetTagUnbindingApiPost: from services.tag_service import TagBindingDeletePayload mock_tag_svc.delete_tag_binding.assert_called_once_with( - TagBindingDeletePayload(tag_id="tag-1", target_id="ds-1", type="knowledge") + TagBindingDeletePayload(tag_ids=["tag-1"], target_id="ds-1", type="knowledge") ) @patch("controllers.service_api.dataset.dataset.current_user") - def test_unbind_tag_forbidden(self, mock_current_user, app): + def test_unbind_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagUnbindingApi mock_current_user.__class__ = Account @@ -1038,7 +1079,7 @@ class TestDatasetTagUnbindingApiPost: with app.test_request_context( "/datasets/tags/unbinding", method="POST", - json={"tag_id": "tag-1", "target_id": "ds-1"}, + json={"tag_ids": ["tag-1"], "target_id": "ds-1"}, ): api = DatasetTagUnbindingApi() with pytest.raises(Forbidden): diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py b/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py index e1e6741014..c34da27ebe 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py @@ -7,6 +7,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.web.conversation import ( @@ -34,16 +35,16 @@ def _end_user() -> SimpleNamespace: class TestConversationListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context("/conversations"): with pytest.raises(NotChatAppError): ConversationListApi().get(_completion_app(), _end_user()) @patch("controllers.web.conversation.WebConversationService.pagination_by_last_id") - def test_happy_path(self, mock_paginate: MagicMock, app) -> None: + def test_happy_path(self, mock_paginate: MagicMock, app: Flask) -> None: conv_id = str(uuid4()) conv = SimpleNamespace( id=conv_id, @@ -65,16 +66,16 @@ class TestConversationListApi: class TestConversationApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}"): with pytest.raises(NotChatAppError): ConversationApi().delete(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.ConversationService.delete") - def test_delete_success(self, mock_delete: MagicMock, app) -> None: + def test_delete_success(self, mock_delete: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}"): result, status = ConversationApi().delete(_chat_app(), _end_user(), c_id) @@ -83,7 +84,7 @@ class TestConversationApi: assert result["result"] == "success" @patch("controllers.web.conversation.ConversationService.delete", side_effect=ConversationNotExistsError()) - def test_delete_not_found(self, mock_delete: MagicMock, app) -> None: + def test_delete_not_found(self, mock_delete: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}"): with pytest.raises(NotFound, match="Conversation Not Exists"): @@ -92,17 +93,17 @@ class TestConversationApi: class TestConversationRenameApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}/name", method="POST", json={"name": "x"}): with pytest.raises(NotChatAppError): ConversationRenameApi().post(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.ConversationService.rename") @patch("controllers.web.conversation.web_ns") - def test_rename_success(self, mock_ns: MagicMock, mock_rename: MagicMock, app) -> None: + def test_rename_success(self, mock_ns: MagicMock, mock_rename: MagicMock, app: Flask) -> None: c_id = uuid4() mock_ns.payload = {"name": "New Name", "auto_generate": False} conv = SimpleNamespace( @@ -126,7 +127,7 @@ class TestConversationRenameApi: side_effect=ConversationNotExistsError(), ) @patch("controllers.web.conversation.web_ns") - def test_rename_not_found(self, mock_ns: MagicMock, mock_rename: MagicMock, app) -> None: + def test_rename_not_found(self, mock_ns: MagicMock, mock_rename: MagicMock, app: Flask) -> None: c_id = uuid4() mock_ns.payload = {"name": "X", "auto_generate": False} @@ -137,16 +138,16 @@ class TestConversationRenameApi: class TestConversationPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}/pin", method="PATCH"): with pytest.raises(NotChatAppError): ConversationPinApi().patch(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.WebConversationService.pin") - def test_pin_success(self, mock_pin: MagicMock, app) -> None: + def test_pin_success(self, mock_pin: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}/pin", method="PATCH"): result = ConversationPinApi().patch(_chat_app(), _end_user(), c_id) @@ -154,7 +155,7 @@ class TestConversationPinApi: assert result["result"] == "success" @patch("controllers.web.conversation.WebConversationService.pin", side_effect=ConversationNotExistsError()) - def test_pin_not_found(self, mock_pin: MagicMock, app) -> None: + def test_pin_not_found(self, mock_pin: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}/pin", method="PATCH"): with pytest.raises(NotFound): @@ -163,16 +164,16 @@ class TestConversationPinApi: class TestConversationUnPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}/unpin", method="PATCH"): with pytest.raises(NotChatAppError): ConversationUnPinApi().patch(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.WebConversationService.unpin") - def test_unpin_success(self, mock_unpin: MagicMock, app) -> None: + def test_unpin_success(self, mock_unpin: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}/unpin", method="PATCH"): result = ConversationUnPinApi().patch(_chat_app(), _end_user(), c_id) diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py index 635cfee2da..2c6a990240 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py @@ -7,6 +7,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.web.forgot_password import ( ForgotPasswordCheckApi, @@ -29,7 +30,7 @@ def _patch_wraps(): class TestForgotPasswordSendEmailApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.web.forgot_password.AccountService.send_reset_password_email") @@ -42,7 +43,7 @@ class TestForgotPasswordSendEmailApi: mock_rate_limit, mock_get_account, mock_send_mail, - app, + app: Flask, ): mock_account = MagicMock() mock_get_account.return_value = mock_account @@ -64,7 +65,7 @@ class TestForgotPasswordSendEmailApi: class TestForgotPasswordCheckApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.web.forgot_password.AccountService.reset_forgot_password_error_rate_limit") @@ -81,7 +82,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_is_rate_limit.return_value = False mock_get_data.return_value = {"email": "User@Example.com", "code": "1234"} @@ -117,7 +118,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_is_rate_limit.return_value = False mock_get_data.return_value = {"email": "MixedCase@Example.com", "code": "5678"} @@ -142,7 +143,7 @@ class TestForgotPasswordCheckApi: class TestForgotPasswordResetApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.web.forgot_password.ForgotPasswordResetApi._update_existing_account") @@ -157,7 +158,7 @@ class TestForgotPasswordResetApi: mock_db, mock_get_account, mock_update_account, - app, + app: Flask, ): mock_get_reset_data.return_value = {"phase": "reset", "email": "User@Example.com", "code": "1234"} mock_account = MagicMock() @@ -194,7 +195,7 @@ class TestForgotPasswordResetApi: mock_db, mock_token_bytes, mock_hash_password, - app, + app: Flask, ): mock_get_reset_data.return_value = {"phase": "reset", "email": "user@example.com"} account = MagicMock() diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py b/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py index 19833cc772..0a4e495f36 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py @@ -8,6 +8,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest, NotFound, Unauthorized @@ -182,7 +183,7 @@ class TestValidateUserAccessibility: class TestDecodeJwtToken: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def _create_app_site_enduser(self, db_session: Session, *, enable_site: bool = True): @@ -239,7 +240,7 @@ class TestDecodeJwtToken: mock_access_mode: MagicMock, mock_validate_token: MagicMock, mock_validate_user: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, end_user = self._create_app_site_enduser(db_session_with_containers) @@ -299,7 +300,7 @@ class TestDecodeJwtToken: mock_extract: MagicMock, mock_passport_cls: MagicMock, mock_features: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, end_user = self._create_app_site_enduser(db_session_with_containers, enable_site=False) @@ -324,7 +325,7 @@ class TestDecodeJwtToken: mock_extract: MagicMock, mock_passport_cls: MagicMock, mock_features: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, _ = self._create_app_site_enduser(db_session_with_containers) @@ -350,7 +351,7 @@ class TestDecodeJwtToken: mock_extract: MagicMock, mock_passport_cls: MagicMock, mock_features: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, end_user = self._create_app_site_enduser(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py index c342e8994b..bd13527e14 100644 --- a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py +++ b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py @@ -85,7 +85,7 @@ class TestPauseStatePersistenceLayerTestContainers: return WorkflowRunService(engine) @pytest.fixture(autouse=True) - def setup_test_data(self, db_session_with_containers, file_service, workflow_run_service): + def setup_test_data(self, db_session_with_containers: Session, file_service, workflow_run_service): """Set up test data for each test method using TestContainers.""" # Create test tenant and account from models.account import AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus @@ -295,7 +295,7 @@ class TestPauseStatePersistenceLayerTestContainers: generate_entity=entity, ) - def test_complete_pause_flow_with_real_dependencies(self, db_session_with_containers): + def test_complete_pause_flow_with_real_dependencies(self, db_session_with_containers: Session): """Test complete pause flow: event -> state serialization -> database save -> storage save.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -352,7 +352,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert isinstance(persisted_entity, WorkflowAppGenerateEntity) assert persisted_entity.workflow_execution_id == self.test_workflow_run_id - def test_state_persistence_and_retrieval(self, db_session_with_containers): + def test_state_persistence_and_retrieval(self, db_session_with_containers: Session): """Test that pause state can be persisted and retrieved correctly.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -402,7 +402,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert retrieved_state["node_run_steps"] == 10 assert resumption_context.get_generate_entity().workflow_execution_id == self.test_workflow_run_id - def test_database_transaction_handling(self, db_session_with_containers): + def test_database_transaction_handling(self, db_session_with_containers: Session): """Test that database transactions are handled correctly.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -433,7 +433,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert pause_model.resumed_at is None assert pause_model.state_object_key != "" - def test_file_storage_integration(self, db_session_with_containers): + def test_file_storage_integration(self, db_session_with_containers: Session): """Test integration with file storage system.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -467,7 +467,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert resumption_context.serialized_graph_runtime_state == graph_runtime_state.dumps() assert resumption_context.get_generate_entity().workflow_execution_id == self.test_workflow_run_id - def test_workflow_with_different_creators(self, db_session_with_containers): + def test_workflow_with_different_creators(self, db_session_with_containers: Session): """Test pause state with workflows created by different users.""" # Arrange - Create workflow with different creator different_user_id = str(uuid.uuid4()) @@ -532,7 +532,7 @@ class TestPauseStatePersistenceLayerTestContainers: resumption_context = WorkflowResumptionContext.loads(pause_entity.get_state().decode()) assert resumption_context.get_generate_entity().workflow_execution_id == different_workflow_run.id - def test_layer_ignores_non_pause_events(self, db_session_with_containers): + def test_layer_ignores_non_pause_events(self, db_session_with_containers: Session): """Test that layer ignores non-pause events.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -562,7 +562,7 @@ class TestPauseStatePersistenceLayerTestContainers: ).all() assert len(pause_states) == 0 - def test_layer_requires_initialization(self, db_session_with_containers): + def test_layer_requires_initialization(self, db_session_with_containers: Session): """Test that layer requires proper initialization before handling events.""" # Arrange layer = self._create_pause_state_persistence_layer() diff --git a/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py b/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py index a60159c66a..d1af0a56ef 100644 --- a/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py +++ b/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py @@ -15,11 +15,14 @@ from uuid import uuid4 import pytest from faker import Faker +from sqlalchemy.orm import Session from core.rag.pipeline.queue import TaskWrapper, TenantIsolatedTaskQueue from extensions.ext_redis import redis_client from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus +TenantAndAccount = tuple[Tenant, Account] + @dataclass class TestTask: @@ -40,7 +43,7 @@ class TestTenantIsolatedTaskQueueIntegration: return Faker() @pytest.fixture - def test_tenant_and_account(self, db_session_with_containers, fake): + def test_tenant_and_account(self, db_session_with_containers: Session, fake: Faker): """Create test tenant and account for testing.""" # Create account account = Account( @@ -73,18 +76,18 @@ class TestTenantIsolatedTaskQueueIntegration: return tenant, account @pytest.fixture - def test_queue(self, test_tenant_and_account): + def test_queue(self, test_tenant_and_account: TenantAndAccount): """Create a generic test queue for testing.""" tenant, _ = test_tenant_and_account return TenantIsolatedTaskQueue(tenant.id, "test_queue") @pytest.fixture - def secondary_queue(self, test_tenant_and_account): + def secondary_queue(self, test_tenant_and_account: TenantAndAccount): """Create a secondary test queue for testing isolation.""" tenant, _ = test_tenant_and_account return TenantIsolatedTaskQueue(tenant.id, "secondary_queue") - def test_queue_initialization(self, test_tenant_and_account): + def test_queue_initialization(self, test_tenant_and_account: TenantAndAccount): """Test queue initialization with correct key generation.""" tenant, _ = test_tenant_and_account queue = TenantIsolatedTaskQueue(tenant.id, "test-key") @@ -94,7 +97,9 @@ class TestTenantIsolatedTaskQueueIntegration: assert queue._queue == f"tenant_self_test-key_task_queue:{tenant.id}" assert queue._task_key == f"tenant_test-key_task:{tenant.id}" - def test_tenant_isolation(self, test_tenant_and_account, db_session_with_containers, fake): + def test_tenant_isolation( + self, test_tenant_and_account: TenantAndAccount, db_session_with_containers: Session, fake: Faker + ): """Test that different tenants have isolated queues.""" tenant1, _ = test_tenant_and_account @@ -114,7 +119,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert queue1._queue == f"tenant_self_same-key_task_queue:{tenant1.id}" assert queue2._queue == f"tenant_self_same-key_task_queue:{tenant2.id}" - def test_key_isolation(self, test_tenant_and_account): + def test_key_isolation(self, test_tenant_and_account: TenantAndAccount): """Test that different keys have isolated queues.""" tenant, _ = test_tenant_and_account queue1 = TenantIsolatedTaskQueue(tenant.id, "key1") @@ -176,7 +181,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert len(remaining_tasks) == 2 assert remaining_tasks == ["task4", "task5"] - def test_push_and_pull_complex_objects(self, test_queue, fake): + def test_push_and_pull_complex_objects(self, test_queue, fake: Faker): """Test pushing and pulling complex object tasks.""" # Create complex task objects as dictionaries (not dataclass instances) tasks = [ @@ -218,7 +223,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert pulled_task["data"] == original_task["data"] assert pulled_task["metadata"] == original_task["metadata"] - def test_mixed_task_types(self, test_queue, fake): + def test_mixed_task_types(self, test_queue, fake: Faker): """Test pushing and pulling mixed string and object tasks.""" string_task = "simple_string_task" object_task = { @@ -267,7 +272,7 @@ class TestTenantIsolatedTaskQueueIntegration: # Verify task key has expired assert test_queue.get_task_key() is None - def test_large_task_batch(self, test_queue, fake): + def test_large_task_batch(self, test_queue, fake: Faker): """Test handling large batches of tasks.""" # Create large batch of tasks large_batch = [] @@ -292,7 +297,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert isinstance(task, dict) assert task["index"] == i # FIFO order - def test_queue_operations_isolation(self, test_tenant_and_account, fake): + def test_queue_operations_isolation(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """Test concurrent operations on different queues.""" tenant, _ = test_tenant_and_account @@ -312,7 +317,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert tasks2 == ["task1_queue2", "task2_queue2"] assert tasks1 != tasks2 - def test_task_wrapper_serialization_roundtrip(self, test_queue, fake): + def test_task_wrapper_serialization_roundtrip(self, test_queue, fake: Faker): """Test TaskWrapper serialization and deserialization roundtrip.""" # Create complex nested data complex_data = { @@ -346,7 +351,7 @@ class TestTenantIsolatedTaskQueueIntegration: task = test_queue.pull_tasks(1) assert task[0] == invalid_json_task - def test_real_world_batch_processing_scenario(self, test_queue, fake): + def test_real_world_batch_processing_scenario(self, test_queue, fake: Faker): """Test realistic batch processing scenario.""" # Simulate batch processing tasks batch_tasks = [] @@ -403,7 +408,7 @@ class TestTenantIsolatedTaskQueueCompatibility: return Faker() @pytest.fixture - def test_tenant_and_account(self, db_session_with_containers, fake): + def test_tenant_and_account(self, db_session_with_containers: Session, fake: Faker): """Create test tenant and account for testing.""" # Create account account = Account( @@ -435,7 +440,7 @@ class TestTenantIsolatedTaskQueueCompatibility: return tenant, account - def test_legacy_string_queue_compatibility(self, test_tenant_and_account, fake): + def test_legacy_string_queue_compatibility(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """ Test compatibility with legacy queues containing only string data. @@ -465,7 +470,7 @@ class TestTenantIsolatedTaskQueueCompatibility: expected_order = ["legacy_task_1", "legacy_task_2", "legacy_task_3", "legacy_task_4", "legacy_task_5"] assert pulled_tasks == expected_order - def test_legacy_queue_migration_scenario(self, test_tenant_and_account, fake): + def test_legacy_queue_migration_scenario(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """ Test complete migration scenario from legacy to new system. @@ -546,7 +551,7 @@ class TestTenantIsolatedTaskQueueCompatibility: assert task["tenant_id"] == tenant.id assert task["processing_type"] == "new_system" - def test_legacy_queue_error_recovery(self, test_tenant_and_account, fake): + def test_legacy_queue_error_recovery(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """ Test error recovery when legacy queue contains malformed data. diff --git a/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py b/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py index 00d7496a40..9da6b04a2c 100644 --- a/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py +++ b/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py @@ -3,6 +3,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from core.rag.retrieval.dataset_retrieval import DatasetRetrieval @@ -15,7 +16,7 @@ from tests.test_containers_integration_tests.helpers import generate_valid_passw class TestGetAvailableDatasetsIntegration: def test_returns_datasets_with_available_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -77,7 +78,7 @@ class TestGetAvailableDatasetsIntegration: assert result[0].name == dataset.name def test_filters_out_datasets_with_only_archived_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -130,7 +131,7 @@ class TestGetAvailableDatasetsIntegration: assert len(result) == 0 def test_filters_out_datasets_with_only_disabled_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -183,7 +184,7 @@ class TestGetAvailableDatasetsIntegration: assert len(result) == 0 def test_filters_out_datasets_with_non_completed_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -236,7 +237,7 @@ class TestGetAvailableDatasetsIntegration: assert len(result) == 0 def test_includes_external_datasets_without_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test that external datasets are returned even with no available documents. @@ -280,7 +281,7 @@ class TestGetAvailableDatasetsIntegration: assert result[0].id == dataset.id assert result[0].provider == "external" - def test_filters_by_tenant_id(self, db_session_with_containers, mock_external_service_dependencies): + def test_filters_by_tenant_id(self, db_session_with_containers: Session, mock_external_service_dependencies): # Arrange fake = Faker() @@ -356,7 +357,7 @@ class TestGetAvailableDatasetsIntegration: assert result[0].tenant_id == tenant1.id def test_returns_empty_list_when_no_datasets_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -379,7 +380,9 @@ class TestGetAvailableDatasetsIntegration: # Assert assert result == [] - def test_returns_only_requested_dataset_ids(self, db_session_with_containers, mock_external_service_dependencies): + def test_returns_only_requested_dataset_ids( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): # Arrange fake = Faker() @@ -439,7 +442,7 @@ class TestGetAvailableDatasetsIntegration: class TestKnowledgeRetrievalIntegration: def test_knowledge_retrieval_with_available_datasets( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -507,7 +510,7 @@ class TestKnowledgeRetrievalIntegration: assert isinstance(result, list) def test_knowledge_retrieval_no_available_datasets( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -555,7 +558,7 @@ class TestKnowledgeRetrievalIntegration: assert result == [] def test_knowledge_retrieval_rate_limit_exceeded( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() diff --git a/api/tests/test_containers_integration_tests/core/repositories/test_human_input_form_repository_impl.py b/api/tests/test_containers_integration_tests/core/repositories/test_human_input_form_repository_impl.py index 6524d6ce61..676833ce98 100644 --- a/api/tests/test_containers_integration_tests/core/repositories/test_human_input_form_repository_impl.py +++ b/api/tests/test_containers_integration_tests/core/repositories/test_human_input_form_repository_impl.py @@ -17,7 +17,7 @@ from core.workflow.human_input_adapter import ( MemberRecipient, WebAppDeliveryMethod, ) -from graphon.nodes.human_input.entities import FormDefinition, HumanInputNodeData, UserAction +from graphon.nodes.human_input.entities import FormDefinition, HumanInputNodeData, UserActionConfig from models.account import ( Account, AccountStatus, @@ -69,7 +69,7 @@ def _build_form_params(delivery_methods: list[DeliveryChannelConfig]) -> FormCre title="Human Approval", delivery_methods=delivery_methods, form_content="

Approve?

", - user_actions=[UserAction(id="approve", title="Approve")], + user_actions=[UserActionConfig(id="approve", title="Approve")], ) return FormCreateParams( workflow_execution_id=str(uuid4()), @@ -185,7 +185,7 @@ class TestHumanInputFormRepositoryImplWithContainers: title="Human Approval", form_content="

Approve?

", inputs=[], - user_actions=[UserAction(id="approve", title="Approve")], + user_actions=[UserActionConfig(id="approve", title="Approve")], ), rendered_content="

Approve?

", delivery_methods=[], @@ -220,7 +220,7 @@ class TestHumanInputFormRepositoryImplWithContainers: title="Human Approval", form_content="

Approve?

", inputs=[], - user_actions=[UserAction(id="approve", title="Approve")], + user_actions=[UserActionConfig(id="approve", title="Approve")], delivery_methods=[WebAppDeliveryMethod()], ), rendered_content="

Approve?

", diff --git a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py index 5aed230cd4..54cf179341 100644 --- a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py +++ b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py @@ -21,7 +21,7 @@ from graphon.graph_engine import GraphEngine from graphon.graph_engine.command_channels import InMemoryChannel from graphon.nodes.end.end_node import EndNode from graphon.nodes.end.entities import EndNodeData -from graphon.nodes.human_input.entities import HumanInputNodeData, UserAction +from graphon.nodes.human_input.entities import HumanInputNodeData, UserActionConfig from graphon.nodes.human_input.enums import HumanInputFormStatus from graphon.nodes.human_input.human_input_node import HumanInputNode from graphon.nodes.start.entities import StartNodeData @@ -112,7 +112,7 @@ def _build_graph( form_content="Awaiting human input", inputs=[], user_actions=[ - UserAction(id="continue", title="Continue"), + UserActionConfig(id="continue", title="Continue"), ], ) human_node = HumanInputNode( diff --git a/api/tests/test_containers_integration_tests/helpers/execution_extra_content.py b/api/tests/test_containers_integration_tests/helpers/execution_extra_content.py index 2fd289dfbc..2a1638d126 100644 --- a/api/tests/test_containers_integration_tests/helpers/execution_extra_content.py +++ b/api/tests/test_containers_integration_tests/helpers/execution_extra_content.py @@ -5,7 +5,7 @@ from datetime import timedelta from decimal import Decimal from uuid import uuid4 -from graphon.nodes.human_input.entities import FormDefinition, UserAction +from graphon.nodes.human_input.entities import FormDefinition, UserActionConfig from libs.datetime_utils import naive_utc_now from models.account import Account, Tenant, TenantAccountJoin from models.enums import ConversationFromSource, InvokeFrom @@ -116,7 +116,7 @@ def create_human_input_message_fixture(db_session) -> HumanInputMessageFixture: form_definition = FormDefinition( form_content="content", inputs=[], - user_actions=[UserAction(id=action_id, title=action_text)], + user_actions=[UserActionConfig(id=action_id, title=action_text)], rendered_content="Rendered block", expiration_time=naive_utc_now() + timedelta(days=1), node_title=node_title, diff --git a/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py b/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py index 43915a204d..84c1d0ca41 100644 --- a/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py +++ b/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py @@ -8,6 +8,7 @@ Covers real Redis 7+ sharded pub/sub interactions including: - Resource cleanup accounting via PUBSUB SHARDNUMSUB """ +import socket import threading import time import uuid @@ -356,10 +357,17 @@ class TestShardedRedisBroadcastChannelClusterIntegration: def _get_test_topic_name(cls) -> str: return f"test_sharded_cluster_topic_{uuid.uuid4()}" + @staticmethod + def _resolve_announced_ip(host: str) -> str: + """Resolve the container host name to a literal IP accepted by Redis cluster config.""" + return socket.getaddrinfo(host, None, type=socket.SOCK_STREAM)[0][4][0] + @staticmethod def _ensure_single_node_cluster(host: str, port: int) -> None: + """Bootstrap a single-node cluster using a literal IP for Redis node advertisement.""" client = redis.Redis(host=host, port=port, decode_responses=False) - client.config_set("cluster-announce-ip", host) + announced_ip = TestShardedRedisBroadcastChannelClusterIntegration._resolve_announced_ip(host) + client.config_set("cluster-announce-ip", announced_ip) client.config_set("cluster-announce-port", port) slots = client.execute_command("CLUSTER", "SLOTS") if not slots: diff --git a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py index 178fc2e4fb..390795486b 100644 --- a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py +++ b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py @@ -11,7 +11,7 @@ from libs import helper as helper_module @pytest.mark.usefixtures("flask_app_with_containers") -def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch): +def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch: pytest.MonkeyPatch): prefix = f"test_rate_limit:{uuid.uuid4().hex}" limiter = helper_module.RateLimiter(prefix=prefix, max_attempts=2, time_window=60) key = limiter._get_key("203.0.113.10") diff --git a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py index d9828e19c5..b3df7f512a 100644 --- a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py +++ b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py @@ -17,8 +17,8 @@ from extensions.ext_storage import storage from graphon.entities import WorkflowExecution from graphon.entities.pause_reason import HumanInputRequired, PauseReasonType from graphon.enums import WorkflowExecutionStatus -from graphon.nodes.human_input.entities import FormDefinition, FormInput, UserAction -from graphon.nodes.human_input.enums import FormInputType, HumanInputFormStatus +from graphon.nodes.human_input.entities import FormDefinition, ParagraphInputConfig, UserActionConfig +from graphon.nodes.human_input.enums import HumanInputFormStatus from libs.datetime_utils import naive_utc_now from models.enums import CreatorUserRole, WorkflowRunTriggeredFrom from models.human_input import ( @@ -642,8 +642,8 @@ class TestBuildHumanInputRequiredReason: expiration_time = naive_utc_now() form_definition = FormDefinition( form_content="content", - inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], - user_actions=[UserAction(id="approve", title="Approve")], + inputs=[ParagraphInputConfig(output_variable_name="name")], + user_actions=[UserActionConfig(id="approve", title="Approve")], rendered_content="rendered", expiration_time=expiration_time, default_values={"name": "Alice"}, diff --git a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py index 54b7afc018..1b26a798a1 100644 --- a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py +++ b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py @@ -5,6 +5,7 @@ Part of #32454 — replaces the mock-based unit tests with real database interac from __future__ import annotations +import json from collections.abc import Generator from dataclasses import dataclass from datetime import timedelta @@ -15,7 +16,7 @@ import pytest from sqlalchemy import Engine, delete, select from sqlalchemy.orm import Session, sessionmaker -from graphon.nodes.human_input.entities import FormDefinition, UserAction +from graphon.nodes.human_input.entities import FormDefinition, UserActionConfig from graphon.nodes.human_input.enums import HumanInputFormStatus from libs.datetime_utils import naive_utc_now from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole @@ -174,12 +175,16 @@ def _create_submitted_form( action_id: str = "approve", action_title: str = "Approve", node_title: str = "Approval", + form_content: str = "content", + rendered_content: str | None = None, + inputs: list[dict] | None = None, + submitted_data: dict | None = None, ) -> HumanInputForm: expiration_time = naive_utc_now() + timedelta(days=1) form_definition = FormDefinition( - form_content="content", - inputs=[], - user_actions=[UserAction(id=action_id, title=action_title)], + form_content=form_content, + inputs=inputs or [], + user_actions=[UserActionConfig(id=action_id, title=action_title)], rendered_content="rendered", expiration_time=expiration_time, node_title=node_title, @@ -191,10 +196,12 @@ def _create_submitted_form( workflow_run_id=workflow_run_id, node_id="node-id", form_definition=form_definition.model_dump_json(), - rendered_content=f"Rendered {action_title}", + rendered_content=rendered_content or f"Rendered {action_title}", status=HumanInputFormStatus.SUBMITTED, expiration_time=expiration_time, selected_action_id=action_id, + submitted_data=None if submitted_data is None else json.dumps(submitted_data), + submitted_at=naive_utc_now(), ) session.add(form) session.flush() @@ -212,7 +219,7 @@ def _create_waiting_form( form_definition = FormDefinition( form_content="content", inputs=[], - user_actions=[UserAction(id="approve", title="Approve")], + user_actions=[UserActionConfig(id="approve", title="Approve")], rendered_content="rendered", expiration_time=expiration_time, default_values=default_values or {"name": "John"}, @@ -349,6 +356,127 @@ class TestGetByMessageIds: # msg2 has no content assert result[1] == [] + def test_submitted_content_populates_submission_data_from_stored_form_data( + self, + db_session_with_containers: Session, + repository: SQLAlchemyExecutionExtraContentRepository, + test_scope: _TestScope, + ) -> None: + workflow_run_id = str(uuid4()) + conversation = _create_conversation(db_session_with_containers, test_scope) + msg = _create_message(db_session_with_containers, test_scope, conversation.id, workflow_run_id) + stored_submission_data = {"decision": "approve", "comment": "Looks good"} + form = _create_submitted_form( + db_session_with_containers, + test_scope, + workflow_run_id=workflow_run_id, + submitted_data=stored_submission_data, + ) + _create_human_input_content( + db_session_with_containers, + workflow_run_id=workflow_run_id, + message_id=msg.id, + form_id=form.id, + ) + db_session_with_containers.commit() + + result = repository.get_by_message_ids([msg.id]) + + content = result[0][0] + assert content.form_submission_data is not None + assert content.form_submission_data.submitted_data == stored_submission_data + + def test_submitted_content_exposes_select_and_file_form_data( + self, + db_session_with_containers: Session, + repository: SQLAlchemyExecutionExtraContentRepository, + test_scope: _TestScope, + ) -> None: + workflow_run_id = str(uuid4()) + conversation = _create_conversation(db_session_with_containers, test_scope) + msg = _create_message(db_session_with_containers, test_scope, conversation.id, workflow_run_id) + submitted_data = { + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/file.txt", + "filename": "file.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + "attachments": [ + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/first.txt", + "filename": "first.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/second.txt", + "filename": "second.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + ], + } + form = _create_submitted_form( + db_session_with_containers, + test_scope, + workflow_run_id=workflow_run_id, + form_content=( + "Decision: {{#$output.decision#}}\n" + "Attachment: {{#$output.attachment#}}\n" + "Attachments: {{#$output.attachments#}}" + ), + rendered_content=( + "Decision: {{#$output.decision#}}\n" + "Attachment: {{#$output.attachment#}}\n" + "Attachments: {{#$output.attachments#}}" + ), + inputs=[ + { + "type": "select", + "output_variable_name": "decision", + "option_source": {"type": "constant", "value": ["approve", "reject"]}, + }, + { + "type": "file", + "output_variable_name": "attachment", + "allowed_file_types": ["document"], + "allowed_file_upload_methods": ["remote_url"], + }, + { + "type": "file_list", + "output_variable_name": "attachments", + "allowed_file_types": ["document"], + "allowed_file_upload_methods": ["remote_url"], + "number_limits": 3, + }, + ], + submitted_data=submitted_data, + ) + _create_human_input_content( + db_session_with_containers, + workflow_run_id=workflow_run_id, + message_id=msg.id, + form_id=form.id, + ) + db_session_with_containers.commit() + + result = repository.get_by_message_ids([msg.id]) + + content = result[0][0] + assert content.form_submission_data is not None + assert content.form_submission_data.submitted_data == submitted_data + assert content.form_submission_data.rendered_content == ( + "Decision: approve\nAttachment: [file]\nAttachments: [2 files]" + ) + def test_returns_unsubmitted_form_definition( self, db_session_with_containers: Session, diff --git a/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py b/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py index 177fb95ff3..e71079829f 100644 --- a/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py +++ b/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py @@ -5,6 +5,7 @@ from unittest.mock import Mock, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from models.source import DataSourceApiKeyAuthBinding from services.auth.api_key_auth_service import ApiKeyAuthService @@ -31,7 +32,7 @@ class TestApiKeyAuthService: def mock_args(self, category, provider, mock_credentials) -> dict: return {"category": category, "provider": provider, "credentials": mock_credentials} - def _create_binding(self, db_session, *, tenant_id, category, provider, credentials=None, disabled=False): + def _create_binding(self, db_session: Session, *, tenant_id, category, provider, credentials=None, disabled=False): binding = DataSourceApiKeyAuthBinding( tenant_id=tenant_id, category=category, @@ -44,7 +45,7 @@ class TestApiKeyAuthService: return binding def test_get_provider_auth_list_success( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): self._create_binding(db_session_with_containers, tenant_id=tenant_id, category=category, provider=provider) db_session_with_containers.expire_all() @@ -56,14 +57,16 @@ class TestApiKeyAuthService: assert len(tenant_results) == 1 assert tenant_results[0].provider == provider - def test_get_provider_auth_list_empty(self, flask_app_with_containers, db_session_with_containers, tenant_id): + def test_get_provider_auth_list_empty( + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id + ): result = ApiKeyAuthService.get_provider_auth_list(tenant_id) tenant_results = [r for r in result if r.tenant_id == tenant_id] assert tenant_results == [] def test_get_provider_auth_list_filters_disabled( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): self._create_binding( db_session_with_containers, tenant_id=tenant_id, category=category, provider=provider, disabled=True @@ -78,7 +81,13 @@ class TestApiKeyAuthService: @patch("services.auth.api_key_auth_service.ApiKeyAuthFactory") @patch("services.auth.api_key_auth_service.encrypter") def test_create_provider_auth_success( - self, mock_encrypter, mock_factory, flask_app_with_containers, db_session_with_containers, tenant_id, mock_args + self, + mock_encrypter, + mock_factory, + flask_app_with_containers, + db_session_with_containers: Session, + tenant_id, + mock_args, ): mock_auth_instance = Mock() mock_auth_instance.validate_credentials.return_value = True @@ -97,7 +106,7 @@ class TestApiKeyAuthService: @patch("services.auth.api_key_auth_service.ApiKeyAuthFactory") def test_create_provider_auth_validation_failed( - self, mock_factory, flask_app_with_containers, db_session_with_containers, tenant_id, mock_args + self, mock_factory, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_args ): mock_auth_instance = Mock() mock_auth_instance.validate_credentials.return_value = False @@ -112,7 +121,13 @@ class TestApiKeyAuthService: @patch("services.auth.api_key_auth_service.ApiKeyAuthFactory") @patch("services.auth.api_key_auth_service.encrypter") def test_create_provider_auth_encrypts_api_key( - self, mock_encrypter, mock_factory, flask_app_with_containers, db_session_with_containers, tenant_id, mock_args + self, + mock_encrypter, + mock_factory, + flask_app_with_containers, + db_session_with_containers: Session, + tenant_id, + mock_args, ): mock_auth_instance = Mock() mock_auth_instance.validate_credentials.return_value = True @@ -128,7 +143,13 @@ class TestApiKeyAuthService: mock_encrypter.encrypt_token.assert_called_once_with(tenant_id, original_key) def test_get_auth_credentials_success( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider, mock_credentials + self, + flask_app_with_containers, + db_session_with_containers: Session, + tenant_id, + category, + provider, + mock_credentials, ): self._create_binding( db_session_with_containers, @@ -144,14 +165,14 @@ class TestApiKeyAuthService: assert result == mock_credentials def test_get_auth_credentials_not_found( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): result = ApiKeyAuthService.get_auth_credentials(tenant_id, category, provider) assert result is None def test_get_auth_credentials_json_parsing( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): special_credentials = {"auth_type": "api_key", "config": {"api_key": "key_with_中文_and_special_chars_!@#$%"}} self._create_binding( @@ -169,7 +190,7 @@ class TestApiKeyAuthService: assert result["config"]["api_key"] == "key_with_中文_and_special_chars_!@#$%" def test_delete_provider_auth_success( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): binding = self._create_binding( db_session_with_containers, tenant_id=tenant_id, category=category, provider=provider @@ -183,7 +204,9 @@ class TestApiKeyAuthService: remaining = db_session_with_containers.query(DataSourceApiKeyAuthBinding).filter_by(id=binding_id).first() assert remaining is None - def test_delete_provider_auth_not_found(self, flask_app_with_containers, db_session_with_containers, tenant_id): + def test_delete_provider_auth_not_found( + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id + ): # Should not raise when binding not found ApiKeyAuthService.delete_provider_auth(tenant_id, str(uuid4())) diff --git a/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py b/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py index f48c6da690..e78fa27976 100644 --- a/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py +++ b/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py @@ -10,6 +10,7 @@ from uuid import uuid4 import httpx import pytest +from sqlalchemy.orm import Session from models.source import DataSourceApiKeyAuthBinding from services.auth.api_key_auth_factory import ApiKeyAuthFactory @@ -114,7 +115,7 @@ class TestAuthIntegration: assert result2[0].tenant_id == tenant_id_2 def test_cross_tenant_access_prevention( - self, flask_app_with_containers, db_session_with_containers, tenant_id_2, category + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id_2, category ): result = ApiKeyAuthService.get_auth_credentials(tenant_id_2, category, AuthType.FIRECRAWL) diff --git a/api/tests/test_containers_integration_tests/services/document_service_status.py b/api/tests/test_containers_integration_tests/services/document_service_status.py index 42d587b7f7..327f14ddfe 100644 --- a/api/tests/test_containers_integration_tests/services/document_service_status.py +++ b/api/tests/test_containers_integration_tests/services/document_service_status.py @@ -12,6 +12,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from extensions.storage.storage_type import StorageType @@ -273,7 +274,9 @@ class TestDocumentServicePauseDocument: "user_id": user_id, } - def test_pause_document_waiting_state_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_waiting_state_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful pause of document in waiting state. @@ -310,7 +313,7 @@ class TestDocumentServicePauseDocument: mock_document_service_dependencies["redis_client"].setnx.assert_called_once_with(expected_cache_key, "True") def test_pause_document_indexing_state_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful pause of document in indexing state. @@ -340,7 +343,9 @@ class TestDocumentServicePauseDocument: assert document.is_paused is True assert document.paused_by == mock_document_service_dependencies["user_id"] - def test_pause_document_parsing_state_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_parsing_state_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful pause of document in parsing state. @@ -367,7 +372,9 @@ class TestDocumentServicePauseDocument: db_session_with_containers.refresh(document) assert document.is_paused is True - def test_pause_document_completed_state_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_completed_state_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when trying to pause completed document. @@ -396,7 +403,9 @@ class TestDocumentServicePauseDocument: db_session_with_containers.refresh(document) assert document.is_paused is False - def test_pause_document_error_state_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_error_state_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when trying to pause document in error state. @@ -467,7 +476,9 @@ class TestDocumentServiceRecoverDocument: "recover_task": mock_task, } - def test_recover_document_paused_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_recover_document_paused_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful recovery of paused document. @@ -510,7 +521,9 @@ class TestDocumentServiceRecoverDocument: document.dataset_id, document.id ) - def test_recover_document_not_paused_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_recover_document_not_paused_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when trying to recover non-paused document. @@ -590,7 +603,9 @@ class TestDocumentServiceRetryDocument: "user_id": user_id, } - def test_retry_document_single_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_retry_document_single_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful retry of single document. @@ -629,7 +644,9 @@ class TestDocumentServiceRetryDocument: dataset.id, [document.id], mock_document_service_dependencies["user_id"] ) - def test_retry_document_multiple_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_retry_document_multiple_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful retry of multiple documents. @@ -675,7 +692,7 @@ class TestDocumentServiceRetryDocument: ) def test_retry_document_concurrent_retry_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when document is already being retried. @@ -708,7 +725,7 @@ class TestDocumentServiceRetryDocument: assert document.indexing_status == IndexingStatus.ERROR def test_retry_document_missing_current_user_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when current_user is missing. @@ -794,7 +811,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: } def test_batch_update_document_status_enable_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch enabling of documents. @@ -844,7 +861,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: assert mock_document_service_dependencies["add_task"].delay.call_count == 2 def test_batch_update_document_status_disable_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch disabling of documents. @@ -886,7 +903,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["remove_task"].delay.assert_called_once_with(document.id) def test_batch_update_document_status_archive_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch archiving of documents. @@ -928,7 +945,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["remove_task"].delay.assert_called_once_with(document.id) def test_batch_update_document_status_unarchive_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch unarchiving of documents. @@ -970,7 +987,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["add_task"].delay.assert_called_once_with(document.id) def test_batch_update_document_status_empty_list( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test handling of empty document list. @@ -996,7 +1013,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["remove_task"].delay.assert_not_called() def test_batch_update_document_status_document_indexing_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when document is being indexed. @@ -1073,7 +1090,7 @@ class TestDocumentServiceRenameDocument: "current_user": mock_current_user, } - def test_rename_document_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_success(self, db_session_with_containers: Session, mock_document_service_dependencies): """ Test successful document renaming. @@ -1111,7 +1128,9 @@ class TestDocumentServiceRenameDocument: assert result == document assert document.name == new_name - def test_rename_document_with_built_in_fields(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_with_built_in_fields( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test document renaming with built-in fields enabled. @@ -1154,7 +1173,9 @@ class TestDocumentServiceRenameDocument: assert document.doc_metadata["document_name"] == new_name assert document.doc_metadata["existing_key"] == "existing_value" - def test_rename_document_with_upload_file(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_with_upload_file( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test document renaming with associated upload file. @@ -1202,7 +1223,7 @@ class TestDocumentServiceRenameDocument: assert upload_file.name == new_name def test_rename_document_dataset_not_found_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when dataset is not found. @@ -1224,7 +1245,9 @@ class TestDocumentServiceRenameDocument: with pytest.raises(ValueError, match="Dataset not found"): DocumentService.rename_document(dataset_id, document_id, new_name) - def test_rename_document_not_found_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_not_found_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when document is not found. @@ -1251,7 +1274,9 @@ class TestDocumentServiceRenameDocument: with pytest.raises(ValueError, match="Document not found"): DocumentService.rename_document(dataset.id, document_id, new_name) - def test_rename_document_permission_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_permission_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when user lacks permission. diff --git a/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py b/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py index 4e8255d8ed..e73c2afe7f 100644 --- a/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py +++ b/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py @@ -11,6 +11,7 @@ from uuid import uuid4 import pytest from redis import RedisError +from sqlalchemy.orm import Session from extensions.ext_redis import redis_client from models.account import TenantAccountJoin @@ -122,7 +123,7 @@ class TestSyncAccountDeletion: mock_queue_task.assert_not_called() def test_sync_account_deletion_multiple_workspaces( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): account_id = str(uuid4()) tenant_ids = [str(uuid4()) for _ in range(3)] @@ -144,7 +145,7 @@ class TestSyncAccountDeletion: assert queued_workspace_ids == set(tenant_ids) def test_sync_account_deletion_no_workspaces( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): with patch("services.enterprise.account_deletion_sync.dify_config") as mock_config: mock_config.ENTERPRISE_ENABLED = True @@ -155,7 +156,7 @@ class TestSyncAccountDeletion: mock_queue_task.assert_not_called() def test_sync_account_deletion_partial_failure( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): account_id = str(uuid4()) tenant_ids = [str(uuid4()) for _ in range(3)] @@ -180,7 +181,7 @@ class TestSyncAccountDeletion: assert mock_queue_task.call_count == 3 def test_sync_account_deletion_all_failures( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): account_id = str(uuid4()) tenant_id = str(uuid4()) diff --git a/api/tests/test_containers_integration_tests/services/plugin/test_plugin_permission_service.py b/api/tests/test_containers_integration_tests/services/plugin/test_plugin_permission_service.py new file mode 100644 index 0000000000..49d06986fd --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/plugin/test_plugin_permission_service.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from uuid import uuid4 + +from sqlalchemy import func, select +from sqlalchemy.orm import Session + +from models.account import TenantPluginPermission +from services.plugin.plugin_permission_service import PluginPermissionService + + +def _tenant_id() -> str: + return str(uuid4()) + + +def _get_permission(session: Session, tenant_id: str) -> TenantPluginPermission | None: + session.expire_all() + stmt = select(TenantPluginPermission).where(TenantPluginPermission.tenant_id == tenant_id) + return session.scalars(stmt).one_or_none() + + +def _count_permissions(session: Session, tenant_id: str) -> int: + stmt = select(func.count()).select_from(TenantPluginPermission).where(TenantPluginPermission.tenant_id == tenant_id) + return session.scalar(stmt) or 0 + + +class TestGetPermission: + """Integration tests for PluginPermissionService.get_permission using testcontainers.""" + + def test_returns_permission_when_found(self, db_session_with_containers: Session): + tenant_id = _tenant_id() + permission = TenantPluginPermission( + tenant_id=tenant_id, + install_permission=TenantPluginPermission.InstallPermission.ADMINS, + debug_permission=TenantPluginPermission.DebugPermission.EVERYONE, + ) + db_session_with_containers.add(permission) + db_session_with_containers.commit() + + result = PluginPermissionService.get_permission(tenant_id) + + assert result is not None + assert result.id == permission.id + assert result.tenant_id == tenant_id + assert result.install_permission == TenantPluginPermission.InstallPermission.ADMINS + assert result.debug_permission == TenantPluginPermission.DebugPermission.EVERYONE + + def test_returns_none_when_not_found(self, db_session_with_containers: Session): + result = PluginPermissionService.get_permission(_tenant_id()) + + assert result is None + + +class TestChangePermission: + """Integration tests for PluginPermissionService.change_permission using testcontainers.""" + + def test_creates_new_permission_when_not_exists(self, db_session_with_containers: Session): + tenant_id = _tenant_id() + + result = PluginPermissionService.change_permission( + tenant_id, + TenantPluginPermission.InstallPermission.EVERYONE, + TenantPluginPermission.DebugPermission.EVERYONE, + ) + + permission = _get_permission(db_session_with_containers, tenant_id) + assert result is True + assert permission is not None + assert permission.install_permission == TenantPluginPermission.InstallPermission.EVERYONE + assert permission.debug_permission == TenantPluginPermission.DebugPermission.EVERYONE + + def test_updates_existing_permission(self, db_session_with_containers: Session): + tenant_id = _tenant_id() + existing = TenantPluginPermission( + tenant_id=tenant_id, + install_permission=TenantPluginPermission.InstallPermission.EVERYONE, + debug_permission=TenantPluginPermission.DebugPermission.EVERYONE, + ) + db_session_with_containers.add(existing) + db_session_with_containers.commit() + + result = PluginPermissionService.change_permission( + tenant_id, + TenantPluginPermission.InstallPermission.ADMINS, + TenantPluginPermission.DebugPermission.ADMINS, + ) + + permission = _get_permission(db_session_with_containers, tenant_id) + assert result is True + assert permission is not None + assert permission.id == existing.id + assert permission.install_permission == TenantPluginPermission.InstallPermission.ADMINS + assert permission.debug_permission == TenantPluginPermission.DebugPermission.ADMINS + assert _count_permissions(db_session_with_containers, tenant_id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py b/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py index 2b842629a7..11e864176a 100644 --- a/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py +++ b/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py @@ -3,6 +3,8 @@ from __future__ import annotations from unittest.mock import patch from uuid import uuid4 +from sqlalchemy.orm import Session + from models.model import App, RecommendedApp, Site from services.recommend_app.database.database_retrieval import DatabaseRecommendAppRetrieval from services.recommend_app.recommend_app_type import RecommendAppType @@ -45,6 +47,7 @@ def _create_recommended_app( *, app_id: str, category: str = "chat", + categories: list[str] | None = None, language: str = "en-US", is_listed: bool = True, position: int = 1, @@ -55,6 +58,7 @@ def _create_recommended_app( copyright="copy", privacy_policy="pp", category=category, + categories=[category] if categories is None else categories, language=language, is_listed=is_listed, position=position, @@ -91,7 +95,7 @@ class TestDatabaseRecommendAppRetrieval: class TestFetchRecommendedAppsFromDb: - def test_returns_apps_and_sorted_categories(self, flask_app_with_containers, db_session_with_containers): + def test_returns_apps_and_sorted_categories(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_site(db_session_with_containers, app_id=app1.id) @@ -111,7 +115,56 @@ class TestFetchRecommendedAppsFromDb: assert "assistant" in result["categories"] assert "writing" in result["categories"] - def test_falls_back_to_default_language_when_empty(self, flask_app_with_containers, db_session_with_containers): + def test_returns_multiple_categories_for_one_app( + self, flask_app_with_containers, db_session_with_containers: Session + ): + tenant_id = str(uuid4()) + created_app = _create_app(db_session_with_containers, tenant_id=tenant_id) + _create_site(db_session_with_containers, app_id=created_app.id) + _create_recommended_app( + db_session_with_containers, + app_id=created_app.id, + category="writing", + categories=["writing", "assistant"], + ) + + db_session_with_containers.expire_all() + + result = DatabaseRecommendAppRetrieval.fetch_recommended_apps_from_db("en-US") + + recommended_app = next(item for item in result["recommended_apps"] if item["app_id"] == created_app.id) + assert recommended_app["categories"] == ["writing", "assistant"] + assert "writing" in result["categories"] + assert "assistant" in result["categories"] + + def test_ignores_legacy_category_when_categories_are_empty( + self, + flask_app_with_containers, + db_session_with_containers: Session, + ): + legacy_category = f"legacy-empty-{uuid4()}" + tenant_id = str(uuid4()) + created_app = _create_app(db_session_with_containers, tenant_id=tenant_id) + _create_site(db_session_with_containers, app_id=created_app.id) + _create_recommended_app( + db_session_with_containers, + app_id=created_app.id, + category=legacy_category, + categories=[], + ) + + db_session_with_containers.expire_all() + + result = DatabaseRecommendAppRetrieval.fetch_recommended_apps_from_db("en-US") + + recommended_app = next(item for item in result["recommended_apps"] if item["app_id"] == created_app.id) + assert "category" not in recommended_app + assert recommended_app["categories"] == [] + assert legacy_category not in result["categories"] + + def test_falls_back_to_default_language_when_empty( + self, flask_app_with_containers, db_session_with_containers: Session + ): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_site(db_session_with_containers, app_id=app1.id) @@ -124,7 +177,7 @@ class TestFetchRecommendedAppsFromDb: app_ids = {r["app_id"] for r in result["recommended_apps"]} assert app1.id in app_ids - def test_skips_non_public_apps(self, flask_app_with_containers, db_session_with_containers): + def test_skips_non_public_apps(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id, is_public=False) _create_site(db_session_with_containers, app_id=app1.id) @@ -137,7 +190,7 @@ class TestFetchRecommendedAppsFromDb: app_ids = {r["app_id"] for r in result["recommended_apps"]} assert app1.id not in app_ids - def test_skips_apps_without_site(self, flask_app_with_containers, db_session_with_containers): + def test_skips_apps_without_site(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_recommended_app(db_session_with_containers, app_id=app1.id) @@ -151,12 +204,12 @@ class TestFetchRecommendedAppsFromDb: class TestFetchRecommendedAppDetailFromDb: - def test_returns_none_when_not_listed(self, flask_app_with_containers, db_session_with_containers): + def test_returns_none_when_not_listed(self, flask_app_with_containers, db_session_with_containers: Session): result = DatabaseRecommendAppRetrieval.fetch_recommended_app_detail_from_db(str(uuid4())) assert result is None - def test_returns_none_when_app_not_public(self, flask_app_with_containers, db_session_with_containers): + def test_returns_none_when_app_not_public(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id, is_public=False) _create_recommended_app(db_session_with_containers, app_id=app1.id) @@ -168,7 +221,7 @@ class TestFetchRecommendedAppDetailFromDb: assert result is None @patch("services.recommend_app.database.database_retrieval.AppDslService") - def test_returns_detail_on_success(self, mock_dsl, flask_app_with_containers, db_session_with_containers): + def test_returns_detail_on_success(self, mock_dsl, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_site(db_session_with_containers, app_id=app1.id) diff --git a/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py b/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py index 3ec265d009..f78037e503 100644 --- a/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py +++ b/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py @@ -2,6 +2,7 @@ import copy import pytest from faker import Faker +from sqlalchemy.orm import Session from core.prompt.prompt_templates.advanced_prompt_templates import ( BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG, @@ -29,7 +30,9 @@ class TestAdvancedPromptTemplateService: # for consistency with other test files return {} - def test_get_prompt_baichuan_model_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_prompt_baichuan_model_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful prompt generation for Baichuan model. @@ -64,7 +67,9 @@ class TestAdvancedPromptTemplateService: assert "{{#histories#}}" in prompt_text assert "{{#query#}}" in prompt_text - def test_get_prompt_common_model_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_prompt_common_model_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful prompt generation for common models. @@ -100,7 +105,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_prompt_case_insensitive_baichuan_detection( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan model detection is case insensitive. @@ -131,7 +136,7 @@ class TestAdvancedPromptTemplateService: assert BAICHUAN_CONTEXT in prompt_text def test_get_common_prompt_chat_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation for chat app with completion mode. @@ -161,7 +166,9 @@ class TestAdvancedPromptTemplateService: assert "{{#histories#}}" in prompt_text assert "{{#query#}}" in prompt_text - def test_get_common_prompt_chat_app_chat_mode(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_common_prompt_chat_app_chat_mode( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test common prompt generation for chat app with chat mode. @@ -189,7 +196,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_common_prompt_completion_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation for completion app with completion mode. @@ -217,7 +224,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_common_prompt_completion_app_chat_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation for completion app with chat mode. @@ -245,7 +252,9 @@ class TestAdvancedPromptTemplateService: assert CONTEXT in prompt_text assert "{{#pre_prompt#}}" in prompt_text - def test_get_common_prompt_no_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_common_prompt_no_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test common prompt generation without context. @@ -273,7 +282,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_common_prompt_unsupported_app_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation with unsupported app mode. @@ -291,7 +300,7 @@ class TestAdvancedPromptTemplateService: assert result == {} def test_get_common_prompt_unsupported_model_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation with unsupported model mode. @@ -308,7 +317,9 @@ class TestAdvancedPromptTemplateService: # Assert: Verify empty dict is returned assert result == {} - def test_get_completion_prompt_with_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_completion_prompt_with_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test completion prompt generation with context. @@ -339,7 +350,7 @@ class TestAdvancedPromptTemplateService: assert result_text == CONTEXT + original_text def test_get_completion_prompt_without_context( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test completion prompt generation without context. @@ -368,7 +379,9 @@ class TestAdvancedPromptTemplateService: assert result_text == original_text assert CONTEXT not in result_text - def test_get_chat_prompt_with_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_chat_prompt_with_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test chat prompt generation with context. @@ -399,7 +412,9 @@ class TestAdvancedPromptTemplateService: assert original_text in result_text assert result_text == CONTEXT + original_text - def test_get_chat_prompt_without_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_chat_prompt_without_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test chat prompt generation without context. @@ -429,7 +444,7 @@ class TestAdvancedPromptTemplateService: assert CONTEXT not in result_text def test_get_baichuan_prompt_chat_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for chat app with completion mode. @@ -460,7 +475,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_baichuan_prompt_chat_app_chat_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for chat app with chat mode. @@ -489,7 +504,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_baichuan_prompt_completion_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for completion app with completion mode. @@ -517,7 +532,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_baichuan_prompt_completion_app_chat_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for completion app with chat mode. @@ -545,7 +560,9 @@ class TestAdvancedPromptTemplateService: assert BAICHUAN_CONTEXT in prompt_text assert "{{#pre_prompt#}}" in prompt_text - def test_get_baichuan_prompt_no_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_baichuan_prompt_no_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test Baichuan prompt generation without context. @@ -573,7 +590,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_baichuan_prompt_unsupported_app_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation with unsupported app mode. @@ -591,7 +608,7 @@ class TestAdvancedPromptTemplateService: assert result == {} def test_get_baichuan_prompt_unsupported_model_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation with unsupported model mode. @@ -609,7 +626,7 @@ class TestAdvancedPromptTemplateService: assert result == {} def test_get_prompt_all_app_modes_common_model( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test prompt generation for all app modes with common model. @@ -641,7 +658,7 @@ class TestAdvancedPromptTemplateService: assert result != {} def test_get_prompt_all_app_modes_baichuan_model( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test prompt generation for all app modes with Baichuan model. @@ -672,7 +689,7 @@ class TestAdvancedPromptTemplateService: assert result is not None assert result != {} - def test_get_prompt_edge_cases(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_prompt_edge_cases(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test prompt generation with edge cases. @@ -704,7 +721,7 @@ class TestAdvancedPromptTemplateService: # Should either return a valid result or empty dict, but not crash assert result is not None - def test_template_immutability(self, db_session_with_containers, mock_external_service_dependencies): + def test_template_immutability(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test that original templates are not modified. @@ -738,7 +755,9 @@ class TestAdvancedPromptTemplateService: assert original_completion_completion == COMPLETION_APP_COMPLETION_PROMPT_CONFIG assert original_completion_chat == COMPLETION_APP_CHAT_PROMPT_CONFIG - def test_baichuan_template_immutability(self, db_session_with_containers, mock_external_service_dependencies): + def test_baichuan_template_immutability( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test that original Baichuan templates are not modified. @@ -772,7 +791,9 @@ class TestAdvancedPromptTemplateService: assert original_baichuan_completion_completion == BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG assert original_baichuan_completion_chat == BAICHUAN_COMPLETION_APP_CHAT_PROMPT_CONFIG - def test_context_integration_consistency(self, db_session_with_containers, mock_external_service_dependencies): + def test_context_integration_consistency( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test consistency of context integration across different scenarios. @@ -828,7 +849,7 @@ class TestAdvancedPromptTemplateService: assert prompt_text.startswith(CONTEXT) def test_baichuan_context_integration_consistency( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test consistency of Baichuan context integration across different scenarios. diff --git a/api/tests/test_containers_integration_tests/services/test_agent_service.py b/api/tests/test_containers_integration_tests/services/test_agent_service.py index 00a2f9a59f..cbd939c7a4 100644 --- a/api/tests/test_containers_integration_tests/services/test_agent_service.py +++ b/api/tests/test_containers_integration_tests/services/test_agent_service.py @@ -6,7 +6,7 @@ from faker import Faker from sqlalchemy.orm import Session from core.plugin.impl.exc import PluginDaemonClientSideError -from models import Account +from models import Account, CreatorUserRole from models.enums import ConversationFromSource, MessageFileBelongsTo from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought from services.account_service import AccountService, TenantService @@ -246,7 +246,7 @@ class TestAgentService: tool_input=json.dumps({"test_tool": {"input": "test_input"}}), observation=json.dumps({"test_tool": {"output": "test_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought1) @@ -294,7 +294,7 @@ class TestAgentService: agent_thoughts = self._create_test_agent_thoughts(db_session_with_containers, message) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result structure assert result is not None @@ -370,7 +370,7 @@ class TestAgentService: # Execute the method under test with non-existent message with pytest.raises(ValueError, match="Message not found"): - AgentService.get_agent_logs(app, str(conversation.id), fake.uuid4()) + AgentService.get_agent_logs(app, conversation.id, fake.uuid4()) def test_get_agent_logs_with_end_user( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -451,7 +451,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -523,7 +523,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -561,14 +561,14 @@ class TestAgentService: tool_input=json.dumps({"error_tool": {"input": "test_input"}}), observation=json.dumps({"error_tool": {"output": "error_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_error) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -592,7 +592,7 @@ class TestAgentService: conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -654,7 +654,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="App model config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_get_agent_logs_agent_config_not_found( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -673,7 +673,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="Agent config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_list_agent_providers_success( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -687,7 +687,7 @@ class TestAgentService: app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) # Execute the method under test - result = AgentService.list_agent_providers(str(account.id), str(app.tenant_id)) + result = AgentService.list_agent_providers(account.id, app.tenant_id) # Verify the result assert result is not None @@ -696,7 +696,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(str(app.tenant_id)) + mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(app.tenant_id) def test_get_agent_provider_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ @@ -710,7 +710,7 @@ class TestAgentService: provider_name = "test_provider" # Execute the method under test - result = AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + result = AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) # Verify the result assert result is not None @@ -718,7 +718,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(str(app.tenant_id), provider_name) + mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(app.tenant_id, provider_name) def test_get_agent_provider_plugin_error( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -740,7 +740,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match=error_message): - AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) def test_get_agent_logs_with_complex_tool_data( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -796,14 +796,14 @@ class TestAgentService: {"tool1": {"output1": "result1"}, "tool2": {"output2": "result2"}, "tool3": {"output3": "result3"}} ), tokens=100, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(complex_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -891,14 +891,14 @@ class TestAgentService: observation=json.dumps({"file_tool": {"output": "test_output"}}), message_files=json.dumps(["file1", "file2"]), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_files) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -926,7 +926,7 @@ class TestAgentService: mock_external_service_dependencies["current_user"].timezone = "Asia/Shanghai" # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -960,14 +960,14 @@ class TestAgentService: tool_input="", # Empty input observation="", # Empty observation tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(empty_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -1001,14 +1001,14 @@ class TestAgentService: tool_input="invalid json", # Malformed JSON observation="invalid json", # Malformed JSON tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(malformed_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result - should handle malformed JSON gracefully assert result is not None diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py index 1835650c42..a5ec06dc13 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -3,13 +3,15 @@ from __future__ import annotations import base64 import json from types import SimpleNamespace -from typing import Any, cast +from typing import Any from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest import yaml from faker import Faker +from flask import Flask +from sqlalchemy.orm import Session from core.trigger.constants import ( TRIGGER_PLUGIN_NODE_TYPE, @@ -69,6 +71,7 @@ def _pending_yaml_content(version: str = "99.0.0") -> bytes: def _app_stub(**overrides: Any) -> App: + """Create a stub App object for testing without hitting the database.""" defaults = { "id": str(uuid4()), "tenant_id": _DEFAULT_TENANT_ID, @@ -81,14 +84,17 @@ def _app_stub(**overrides: Any) -> App: "use_icon_as_answer_icon": False, "app_model_config": None, } - return cast(App, SimpleNamespace(**(defaults | overrides))) + app = MagicMock(spec=App) + for key, value in (defaults | overrides).items(): + object.__setattr__(app, key, value) + return app class TestAppDslService: """Integration tests for AppDslService using testcontainers.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -129,7 +135,7 @@ class TestAppDslService: "enterprise_service": mock_enterprise_service, } - def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies): + def _create_test_app_and_account(self, db_session_with_containers: Session, mock_external_service_dependencies): fake = Faker() with patch("services.account_service.FeatureService") as mock_account_feature_service: mock_account_feature_service.get_system_features.return_value.is_allow_register = True @@ -192,7 +198,7 @@ class TestAppDslService: def test_check_version_compatibility_newer_version_returns_pending(self): assert _check_version_compatibility("99.0.0") == ImportStatus.PENDING - def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch): + def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(app_dsl_service, "CURRENT_DSL_VERSION", "1.0.0") assert _check_version_compatibility("0.9.9") == ImportStatus.PENDING @@ -206,7 +212,7 @@ class TestAppDslService: # ── Import: Validation ──────────────────────────────────────────── - def test_import_app_invalid_import_mode_raises_value_error(self, db_session_with_containers): + def test_import_app_invalid_import_mode_raises_value_error(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid import_mode"): service.import_app( @@ -215,7 +221,7 @@ class TestAppDslService: yaml_content="version: '0.1.0'", ) - def test_import_app_missing_yaml_content(self, db_session_with_containers): + def test_import_app_missing_yaml_content(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -225,7 +231,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "yaml_content is required" in result.error - def test_import_app_missing_yaml_url(self, db_session_with_containers): + def test_import_app_missing_yaml_url(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -235,7 +241,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "yaml_url is required" in result.error - def test_import_app_yaml_not_mapping_returns_failed(self, db_session_with_containers): + def test_import_app_yaml_not_mapping_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -245,7 +251,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "content must be a mapping" in result.error - def test_import_app_version_not_str_returns_failed(self, db_session_with_containers): + def test_import_app_version_not_str_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) yaml_content = _yaml_dump({"version": 1, "kind": "app", "app": {"name": "x", "mode": "workflow"}}) result = service.import_app( @@ -256,7 +262,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Invalid version type" in result.error - def test_import_app_missing_app_data_returns_failed(self, db_session_with_containers): + def test_import_app_missing_app_data_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -266,7 +272,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Missing app data" in result.error - def test_import_app_yaml_error_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): def bad_safe_load(_content: str): raise yaml.YAMLError("bad") @@ -281,7 +289,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert result.error.startswith("Invalid YAML format:") - def test_import_app_unexpected_error_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_unexpected_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( AppDslService, "_create_or_update_app", @@ -299,7 +309,9 @@ class TestAppDslService: # ── Import: YAML URL ────────────────────────────────────────────── - def test_import_app_yaml_url_fetch_error_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_fetch_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( app_dsl_service.ssrf_proxy, "get", @@ -315,7 +327,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Error fetching YAML from URL: boom" in result.error - def test_import_app_yaml_url_empty_content_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_empty_content_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"" response.raise_for_status.return_value = None @@ -330,7 +344,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Empty content" in result.error - def test_import_app_yaml_url_file_too_large_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_file_too_large_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"x" * (DSL_MAX_SIZE + 1) response.raise_for_status.return_value = None @@ -345,7 +361,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "File size exceeds" in result.error - def test_import_app_yaml_url_user_attachments_keeps_original_url(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_user_attachments_keeps_original_url( + self, db_session_with_containers: Session, monkeypatch + ): yaml_url = "https://github.com/user-attachments/files/24290802/loop-test.yml" yaml_bytes = _pending_yaml_content() @@ -371,7 +389,9 @@ class TestAppDslService: assert result.imported_dsl_version == "99.0.0" assert requested_urls == [yaml_url] - def test_import_app_yaml_url_github_blob_rewrites_to_raw(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_github_blob_rewrites_to_raw( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): yaml_url = "https://github.com/acme/repo/blob/main/app.yml" raw_url = "https://raw.githubusercontent.com/acme/repo/main/app.yml" yaml_bytes = _pending_yaml_content() @@ -400,7 +420,7 @@ class TestAppDslService: # ── Import: App ID checks ──────────────────────────────────────── - def test_import_app_app_id_not_found_returns_failed(self, db_session_with_containers): + def test_import_app_app_id_not_found_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -412,7 +432,7 @@ class TestAppDslService: assert result.error == "App not found" def test_import_app_overwrite_only_allows_workflow_and_advanced_chat( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) assert app.mode == "chat" @@ -429,7 +449,7 @@ class TestAppDslService: # ── Import: Flow ────────────────────────────────────────────────── - def test_import_app_pending_stores_import_info_in_redis(self, db_session_with_containers): + def test_import_app_pending_stores_import_info_in_redis(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -449,7 +469,7 @@ class TestAppDslService: assert stored is not None def test_import_app_completed_uses_declared_dependencies( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): _, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) @@ -483,7 +503,7 @@ class TestAppDslService: @pytest.mark.parametrize("has_workflow", [True, False]) def test_import_app_legacy_versions_extract_dependencies( - self, db_session_with_containers, monkeypatch, has_workflow: bool + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch, has_workflow: bool ): monkeypatch.setattr( AppDslService, @@ -540,13 +560,15 @@ class TestAppDslService: # ── Confirm Import ──────────────────────────────────────────────── - def test_confirm_import_expired_returns_failed(self, db_session_with_containers): + def test_confirm_import_expired_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.confirm_import(import_id=str(uuid4()), account=_account_mock()) assert result.status == ImportStatus.FAILED assert "expired" in result.error - def test_confirm_import_success_deletes_redis_key(self, db_session_with_containers, monkeypatch): + def test_confirm_import_success_deletes_redis_key( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" @@ -579,7 +601,7 @@ class TestAppDslService: assert result.app_id == created_app.id assert redis_client.get(redis_key) is None - def test_confirm_import_invalid_pending_data_type_returns_failed(self, db_session_with_containers): + def test_confirm_import_invalid_pending_data_type_returns_failed(self, db_session_with_containers: Session): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" redis_client.setex(redis_key, IMPORT_INFO_REDIS_EXPIRY, "123") @@ -589,7 +611,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "validation error" in result.error - def test_confirm_import_exception_returns_failed(self, db_session_with_containers): + def test_confirm_import_exception_returns_failed(self, db_session_with_containers: Session): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" redis_client.setex(redis_key, IMPORT_INFO_REDIS_EXPIRY, "not-valid-json") @@ -600,13 +622,15 @@ class TestAppDslService: # ── Check Dependencies ──────────────────────────────────────────── - def test_check_dependencies_returns_empty_when_no_redis_data(self, db_session_with_containers): + def test_check_dependencies_returns_empty_when_no_redis_data(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) app_model = _app_stub() result = service.check_dependencies(app_model=app_model) assert result.leaked_dependencies == [] - def test_check_dependencies_calls_analysis_service(self, db_session_with_containers, monkeypatch): + def test_check_dependencies_calls_analysis_service( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): app_id = str(uuid4()) pending = CheckDependenciesPendingData(dependencies=[], app_id=app_id) redis_client.setex( @@ -634,7 +658,9 @@ class TestAppDslService: result = service.check_dependencies(app_model=_app_stub(id=app_id)) assert len(result.leaked_dependencies) == 1 - def test_check_dependencies_with_real_app(self, db_session_with_containers, mock_external_service_dependencies): + def test_check_dependencies_with_real_app( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) mock_dependencies_json = '{"app_id": "' + app.id + '", "dependencies": []}' @@ -650,12 +676,14 @@ class TestAppDslService: # ── Create/Update App ───────────────────────────────────────────── - def test_create_or_update_app_missing_mode_raises(self, db_session_with_containers): + def test_create_or_update_app_missing_mode_raises(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="loss app mode"): service._create_or_update_app(app=None, data={"app": {}}, account=_account_mock()) - def test_create_or_update_app_existing_app_updates_fields(self, db_session_with_containers, monkeypatch): + def test_create_or_update_app_existing_app_updates_fields( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): fixed_now = object() monkeypatch.setattr(app_dsl_service, "naive_utc_now", lambda: fixed_now) @@ -707,7 +735,7 @@ class TestAppDslService: assert app.icon_background == "#222222" assert app.updated_at is fixed_now - def test_create_or_update_app_new_app_requires_tenant(self, db_session_with_containers): + def test_create_or_update_app_new_app_requires_tenant(self, db_session_with_containers: Session): account = _account_mock() account.current_tenant_id = None service = AppDslService(db_session_with_containers) @@ -719,7 +747,7 @@ class TestAppDslService: ) def test_create_or_update_app_creates_workflow_app_and_saves_dependencies( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): _, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) @@ -755,7 +783,7 @@ class TestAppDslService: stored = redis_client.get(f"{CHECK_DEPENDENCIES_REDIS_KEY_PREFIX}{app.id}") assert stored is not None - def test_create_or_update_app_workflow_missing_workflow_data_raises(self, db_session_with_containers): + def test_create_or_update_app_workflow_missing_workflow_data_raises(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing workflow data"): service._create_or_update_app( @@ -764,17 +792,17 @@ class TestAppDslService: account=_account_mock(), ) - def test_create_or_update_app_chat_requires_model_config(self, db_session_with_containers): + def test_create_or_update_app_chat_requires_model_config(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing model_config"): service._create_or_update_app( - app=_app_stub(mode=AppMode.CHAT.value), - data={"app": {"mode": AppMode.CHAT.value}}, + app=_app_stub(mode=AppMode.CHAT), + data={"app": {"mode": AppMode.CHAT}}, account=_account_mock(), ) def test_create_or_update_app_chat_creates_model_config_and_sends_event( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.app_model_config_id = None @@ -784,7 +812,7 @@ class TestAppDslService: service._create_or_update_app( app=app, data={ - "app": {"mode": AppMode.CHAT.value}, + "app": {"mode": AppMode.CHAT}, "model_config": {"model": {"provider": "openai"}}, }, account=account, @@ -793,18 +821,18 @@ class TestAppDslService: db_session_with_containers.expire_all() assert app.app_model_config_id is not None - def test_create_or_update_app_invalid_mode_raises(self, db_session_with_containers): + def test_create_or_update_app_invalid_mode_raises(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid app mode"): service._create_or_update_app( - app=_app_stub(mode=AppMode.RAG_PIPELINE.value), - data={"app": {"mode": AppMode.RAG_PIPELINE.value}}, + app=_app_stub(mode=AppMode.RAG_PIPELINE), + data={"app": {"mode": AppMode.RAG_PIPELINE}}, account=_account_mock(), ) # ── Export ───────────────────────────────────────────────────────── - def test_export_dsl_delegates_by_mode(self, monkeypatch): + def test_export_dsl_delegates_by_mode(self, monkeypatch: pytest.MonkeyPatch): workflow_calls: list[bool] = [] model_calls: list[bool] = [] monkeypatch.setattr( @@ -826,14 +854,14 @@ class TestAppDslService: assert workflow_calls == [True] chat_app = _app_stub( - mode=AppMode.CHAT.value, + mode=AppMode.CHAT, icon_type="emoji", app_model_config=SimpleNamespace(to_dict=lambda: {"agent_mode": {"tools": []}}), ) AppDslService.export_dsl(chat_app) assert model_calls == [True] - def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch): + def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_append_workflow_export_data", @@ -870,7 +898,7 @@ class TestAppDslService: assert data["app"]["icon_type"] == "image" assert data["app"]["icon_background"] == "#FFEAD5" - def test_export_dsl_chat_app_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_export_dsl_chat_app_success(self, db_session_with_containers: Session, mock_external_service_dependencies): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) model_config = AppModelConfig( @@ -908,7 +936,9 @@ class TestAppDslService: assert "model_config" in exported_data assert "dependencies" in exported_data - def test_export_dsl_workflow_app_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_export_dsl_workflow_app_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.mode = "workflow" db_session_with_containers.commit() @@ -941,7 +971,9 @@ class TestAppDslService: assert "workflow" in exported_data assert "dependencies" in exported_data - def test_export_dsl_with_workflow_id_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_export_dsl_with_workflow_id_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.mode = "workflow" db_session_with_containers.commit() @@ -981,7 +1013,7 @@ class TestAppDslService: assert "workflow" in exported_data def test_export_dsl_with_invalid_workflow_id_raises_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.mode = "workflow" @@ -997,7 +1029,7 @@ class TestAppDslService: # ── Workflow Export Data ─────────────────────────────────────────── - def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch): + def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch: pytest.MonkeyPatch): workflow_dict = { "graph": { "nodes": [ @@ -1097,7 +1129,7 @@ class TestAppDslService: assert nodes[5]["data"]["subscription_id"] == "" assert export_data["dependencies"] == [{"tenant": _DEFAULT_TENANT_ID, "dep": "dep-1"}] - def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch): + def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch: pytest.MonkeyPatch): workflow_service = MagicMock() workflow_service.get_draft_workflow.return_value = None monkeypatch.setattr(app_dsl_service, "WorkflowService", lambda: workflow_service) @@ -1112,7 +1144,7 @@ class TestAppDslService: # ── Model Config Export Data ────────────────────────────────────── - def test_append_model_config_export_data_filters_credential_id(self, monkeypatch): + def test_append_model_config_export_data_filters_credential_id(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_extract_dependencies_from_model_config", @@ -1146,7 +1178,7 @@ class TestAppDslService: # ── Dependency Extraction ───────────────────────────────────────── - def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_tool_dependency", @@ -1216,7 +1248,7 @@ class TestAppDslService: "model:m4", ] - def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.ToolNodeData, "model_validate", @@ -1227,7 +1259,7 @@ class TestAppDslService: ) assert deps == [] - def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch): + def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1250,7 +1282,7 @@ class TestAppDslService: ) assert deps == ["model:p1", "model:p2", "tool:t1"] - def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1264,7 +1296,7 @@ class TestAppDslService: def test_get_leaked_dependencies_empty_returns_empty(self): assert AppDslService.get_leaked_dependencies(_DEFAULT_TENANT_ID, []) == [] - def test_get_leaked_dependencies_delegates(self, monkeypatch): + def test_get_leaked_dependencies_delegates(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "get_leaked_dependencies", @@ -1275,7 +1307,7 @@ class TestAppDslService: # ── Encryption/Decryption ───────────────────────────────────────── - def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch): + def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch: pytest.MonkeyPatch): tenant_id = _DEFAULT_TENANT_ID dataset_uuid = "00000000-0000-0000-0000-000000000000" @@ -1300,7 +1332,7 @@ class TestAppDslService: value = "00000000-0000-0000-0000-000000000000" assert AppDslService.decrypt_dataset_id(encrypted_data=value, tenant_id=_DEFAULT_TENANT_ID) == value - def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", @@ -1308,7 +1340,7 @@ class TestAppDslService: ) assert AppDslService.decrypt_dataset_id(encrypted_data="not-base64", tenant_id=_DEFAULT_TENANT_ID) is None - def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", diff --git a/api/tests/test_containers_integration_tests/services/test_app_generate_service.py b/api/tests/test_containers_integration_tests/services/test_app_generate_service.py index 3229693fd4..e2fe6c8476 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_generate_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_generate_service.py @@ -7,6 +7,7 @@ from faker import Faker from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import App from models.model import EndUser from models.workflow import Workflow from services.app_generate_service import AppGenerateService @@ -184,7 +185,7 @@ class TestAppGenerateService: return app, account - def _create_test_workflow(self, db_session_with_containers: Session, app): + def _create_test_workflow(self, db_session_with_containers: Session, app: App): """ Helper method to create a test workflow for testing. diff --git a/api/tests/test_containers_integration_tests/services/test_app_service.py b/api/tests/test_containers_integration_tests/services/test_app_service.py index b695ae9fd9..837b63d1ea 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_service.py @@ -6,6 +6,7 @@ from sqlalchemy.orm import Session from constants.model_template import default_app_templates from models import Account +from models.enums import AppStatus, CustomizeTokenStrategy from models.model import App, IconType, Site from services.account_service import AccountService, TenantService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -1079,9 +1080,9 @@ class TestAppService: site.app_id = app.id site.code = fake.postalcode() site.title = fake.company() - site.status = "normal" + site.status = AppStatus.NORMAL site.default_language = "en-US" - site.customize_token_strategy = "uuid" + site.customize_token_strategy = CustomizeTokenStrategy.UUID db_session_with_containers.add(site) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_attachment_service.py b/api/tests/test_containers_integration_tests/services/test_attachment_service.py index 768a8baee2..d0c07f0de8 100644 --- a/api/tests/test_containers_integration_tests/services/test_attachment_service.py +++ b/api/tests/test_containers_integration_tests/services/test_attachment_service.py @@ -7,7 +7,7 @@ from uuid import uuid4 import pytest from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import Session, sessionmaker from werkzeug.exceptions import NotFound import services.attachment_service as attachment_service_module @@ -19,7 +19,7 @@ from services.attachment_service import AttachmentService class TestAttachmentService: - def _create_upload_file(self, db_session_with_containers, *, tenant_id: str | None = None) -> UploadFile: + def _create_upload_file(self, db_session_with_containers: Session, *, tenant_id: str | None = None) -> UploadFile: upload_file = UploadFile( tenant_id=tenant_id or str(uuid4()), storage_type=StorageType.OPENDAL, @@ -60,7 +60,7 @@ class TestAttachmentService: with pytest.raises(AssertionError, match="must be a sessionmaker or an Engine."): AttachmentService(session_factory=invalid_session_factory) - def test_should_return_base64_when_file_exists(self, db_session_with_containers): + def test_should_return_base64_when_file_exists(self, db_session_with_containers: Session): upload_file = self._create_upload_file(db_session_with_containers) service = AttachmentService(session_factory=sessionmaker(bind=db.engine)) @@ -70,7 +70,7 @@ class TestAttachmentService: assert result == base64.b64encode(b"binary-content").decode() mock_load.assert_called_once_with(upload_file.key) - def test_should_raise_not_found_when_file_missing(self, db_session_with_containers): + def test_should_raise_not_found_when_file_missing(self, db_session_with_containers: Session): service = AttachmentService(session_factory=sessionmaker(bind=db.engine)) with patch.object(attachment_service_module.storage, "load_once") as mock_load: diff --git a/api/tests/test_containers_integration_tests/services/test_billing_service.py b/api/tests/test_containers_integration_tests/services/test_billing_service.py index 8092c7ad75..4893126d7f 100644 --- a/api/tests/test_containers_integration_tests/services/test_billing_service.py +++ b/api/tests/test_containers_integration_tests/services/test_billing_service.py @@ -4,6 +4,7 @@ from unittest.mock import patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from extensions.ext_redis import redis_client @@ -24,7 +25,7 @@ class TestBillingServiceGetPlanBulkWithCache: """ @pytest.fixture(autouse=True) - def setup_redis_cleanup(self, flask_app_with_containers): + def setup_redis_cleanup(self, flask_app_with_containers: Flask): """Clean up Redis cache before and after each test.""" with flask_app_with_containers.app_context(): # Clean up before test @@ -56,7 +57,7 @@ class TestBillingServiceGetPlanBulkWithCache: return value return None - def test_get_plan_bulk_with_cache_all_cache_hit(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_all_cache_hit(self, flask_app_with_containers: Flask): """Test bulk plan retrieval when all tenants are in cache.""" with flask_app_with_containers.app_context(): # Arrange @@ -87,7 +88,7 @@ class TestBillingServiceGetPlanBulkWithCache: # Verify API was not called mock_get_plan_bulk.assert_not_called() - def test_get_plan_bulk_with_cache_all_cache_miss(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_all_cache_miss(self, flask_app_with_containers: Flask): """Test bulk plan retrieval when all tenants are not in cache.""" with flask_app_with_containers.app_context(): # Arrange @@ -127,7 +128,7 @@ class TestBillingServiceGetPlanBulkWithCache: assert ttl_1 > 0 assert ttl_1 <= 600 # Should be <= 600 seconds - def test_get_plan_bulk_with_cache_partial_cache_hit(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_partial_cache_hit(self, flask_app_with_containers: Flask): """Test bulk plan retrieval when some tenants are in cache, some are not.""" with flask_app_with_containers.app_context(): # Arrange @@ -158,7 +159,7 @@ class TestBillingServiceGetPlanBulkWithCache: cached_data_3 = json.loads(cached_3) assert cached_data_3 == missing_plan["tenant-3"] - def test_get_plan_bulk_with_cache_redis_mget_failure(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_redis_mget_failure(self, flask_app_with_containers: Flask): """Test fallback to API when Redis mget fails.""" with flask_app_with_containers.app_context(): # Arrange @@ -189,7 +190,7 @@ class TestBillingServiceGetPlanBulkWithCache: assert cached_1 is not None assert cached_2 is not None - def test_get_plan_bulk_with_cache_invalid_json_in_cache(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_invalid_json_in_cache(self, flask_app_with_containers: Flask): """Test fallback to API when cache contains invalid JSON.""" with flask_app_with_containers.app_context(): # Arrange @@ -241,7 +242,7 @@ class TestBillingServiceGetPlanBulkWithCache: cached_data_3 = json.loads(cached_3) assert cached_data_3 == expected_plans["tenant-3"] - def test_get_plan_bulk_with_cache_invalid_plan_data_in_cache(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_invalid_plan_data_in_cache(self, flask_app_with_containers: Flask): """Test fallback to API when cache data doesn't match SubscriptionPlan schema.""" with flask_app_with_containers.app_context(): # Arrange @@ -274,7 +275,7 @@ class TestBillingServiceGetPlanBulkWithCache: # Verify API was called for tenant-2 and tenant-3 mock_get_plan_bulk.assert_called_once_with(["tenant-2", "tenant-3"]) - def test_get_plan_bulk_with_cache_redis_pipeline_failure(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_redis_pipeline_failure(self, flask_app_with_containers: Flask): """Test that pipeline failure doesn't affect return value.""" with flask_app_with_containers.app_context(): # Arrange @@ -303,7 +304,7 @@ class TestBillingServiceGetPlanBulkWithCache: # Verify pipeline was attempted mock_pipeline.assert_called_once() - def test_get_plan_bulk_with_cache_empty_tenant_ids(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_empty_tenant_ids(self, flask_app_with_containers: Flask): """Test with empty tenant_ids list.""" with flask_app_with_containers.app_context(): # Act @@ -321,7 +322,7 @@ class TestBillingServiceGetPlanBulkWithCache: # But we should check that mget was not called at all # Since we can't easily verify this without more mocking, we just verify the result - def test_get_plan_bulk_with_cache_ttl_expired(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_ttl_expired(self, flask_app_with_containers: Flask): """Test that expired cache keys are treated as cache misses.""" with flask_app_with_containers.app_context(): # Arrange diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_conversation_service.py index 98c38f2b5f..5f3914eb19 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service.py @@ -7,8 +7,10 @@ from uuid import uuid4 import pytest from sqlalchemy import select +from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.enums import ConversationFromSource from models.model import App, Conversation, EndUser, Message, MessageAnnotation @@ -21,7 +23,7 @@ from services.message_service import MessageService class ConversationServiceIntegrationTestDataFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -40,7 +42,7 @@ class ConversationServiceIntegrationTestDataFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -154,7 +156,7 @@ class ConversationServiceIntegrationTestDataFactory: total_price=Decimal(0), currency="USD", status="normal", - invoke_from=InvokeFrom.WEB_APP.value, + invoke_from=InvokeFrom.WEB_APP, from_source=ConversationFromSource.API if isinstance(user, EndUser) else ConversationFromSource.CONSOLE, from_end_user_id=user.id if isinstance(user, EndUser) else None, from_account_id=user.id if isinstance(user, Account) else None, @@ -170,7 +172,7 @@ class ConversationServiceIntegrationTestDataFactory: class TestConversationServicePagination: """Test conversation pagination operations.""" - def test_pagination_with_non_empty_include_ids(self, db_session_with_containers): + def test_pagination_with_non_empty_include_ids(self, db_session_with_containers: Session): """ Test that non-empty include_ids filters properly. @@ -204,7 +206,7 @@ class TestConversationServicePagination: returned_ids = {conversation.id for conversation in result.data} assert returned_ids == {conversations[0].id, conversations[1].id} - def test_pagination_with_empty_exclude_ids(self, db_session_with_containers): + def test_pagination_with_empty_exclude_ids(self, db_session_with_containers: Session): """ Test that empty exclude_ids doesn't filter. @@ -237,7 +239,7 @@ class TestConversationServicePagination: # Assert assert len(result.data) == len(conversations) - def test_pagination_with_non_empty_exclude_ids(self, db_session_with_containers): + def test_pagination_with_non_empty_exclude_ids(self, db_session_with_containers: Session): """ Test that non-empty exclude_ids filters properly. @@ -271,7 +273,7 @@ class TestConversationServicePagination: returned_ids = {conversation.id for conversation in result.data} assert returned_ids == {conversations[2].id} - def test_pagination_with_sorting_descending(self, db_session_with_containers): + def test_pagination_with_sorting_descending(self, db_session_with_containers: Session): """ Test pagination with descending sort order. @@ -316,7 +318,7 @@ class TestConversationServiceMessageCreation: within conversations. """ - def test_pagination_by_first_id_without_first_id(self, db_session_with_containers): + def test_pagination_by_first_id_without_first_id(self, db_session_with_containers: Session): """ Test message pagination without specifying first_id. @@ -354,7 +356,7 @@ class TestConversationServiceMessageCreation: assert len(result.data) == 3 # All 3 messages returned assert result.has_more is False # No more messages available (3 < limit of 10) - def test_pagination_by_first_id_with_first_id(self, db_session_with_containers): + def test_pagination_by_first_id_with_first_id(self, db_session_with_containers: Session): """ Test message pagination with first_id specified. @@ -399,7 +401,9 @@ class TestConversationServiceMessageCreation: assert len(result.data) == 2 # Only 2 messages returned after first_id assert result.has_more is False # No more messages available (2 < limit of 10) - def test_pagination_by_first_id_raises_error_when_first_message_not_found(self, db_session_with_containers): + def test_pagination_by_first_id_raises_error_when_first_message_not_found( + self, db_session_with_containers: Session + ): """ Test that FirstMessageNotExistsError is raised when first_id doesn't exist. @@ -424,7 +428,7 @@ class TestConversationServiceMessageCreation: limit=10, ) - def test_pagination_with_has_more_flag(self, db_session_with_containers): + def test_pagination_with_has_more_flag(self, db_session_with_containers: Session): """ Test that has_more flag is correctly set when there are more messages. @@ -463,7 +467,7 @@ class TestConversationServiceMessageCreation: assert len(result.data) == limit # Extra message should be removed assert result.has_more is True # Flag should be set - def test_pagination_with_ascending_order(self, db_session_with_containers): + def test_pagination_with_ascending_order(self, db_session_with_containers: Session): """ Test message pagination with ascending order. @@ -512,7 +516,7 @@ class TestConversationServiceSummarization: """ @patch("services.conversation_service.LLMGenerator.generate_conversation_name") - def test_auto_generate_name_success(self, mock_llm_generator, db_session_with_containers): + def test_auto_generate_name_success(self, mock_llm_generator, db_session_with_containers: Session): """ Test successful auto-generation of conversation name. @@ -552,7 +556,7 @@ class TestConversationServiceSummarization: app_model.tenant_id, first_message.query, conversation.id, app_model.id ) - def test_auto_generate_name_raises_error_when_no_message(self, db_session_with_containers): + def test_auto_generate_name_raises_error_when_no_message(self, db_session_with_containers: Session): """ Test that MessageNotExistsError is raised when conversation has no messages. @@ -571,7 +575,9 @@ class TestConversationServiceSummarization: ConversationService.auto_generate_name(app_model, conversation) @patch("services.conversation_service.LLMGenerator.generate_conversation_name") - def test_auto_generate_name_handles_llm_failure_gracefully(self, mock_llm_generator, db_session_with_containers): + def test_auto_generate_name_handles_llm_failure_gracefully( + self, mock_llm_generator, db_session_with_containers: Session + ): """ Test that LLM generation failures are suppressed and don't crash. @@ -604,7 +610,7 @@ class TestConversationServiceSummarization: assert conversation.name == original_name # Name remains unchanged @patch("services.conversation_service.naive_utc_now") - def test_rename_with_manual_name(self, mock_naive_utc_now, db_session_with_containers): + def test_rename_with_manual_name(self, mock_naive_utc_now, db_session_with_containers: Session): """ Test renaming conversation with manual name. @@ -638,7 +644,7 @@ class TestConversationServiceSummarization: assert conversation.updated_at == mock_time @patch("services.conversation_service.LLMGenerator.generate_conversation_name") - def test_rename_with_auto_generate(self, mock_llm_generator, db_session_with_containers): + def test_rename_with_auto_generate(self, mock_llm_generator, db_session_with_containers: Session): """ Test rename delegates to auto_generate_name when auto_generate is True. @@ -682,7 +688,9 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_create_annotation_from_message(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_create_annotation_from_message( + self, mock_current_account, mock_add_task, db_session_with_containers: Session + ): """ Test creating annotation from existing message. @@ -721,7 +729,9 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_create_annotation_without_message(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_create_annotation_without_message( + self, mock_current_account, mock_add_task, db_session_with_containers: Session + ): """ Test creating standalone annotation without message. @@ -753,7 +763,7 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_update_existing_annotation(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_update_existing_annotation(self, mock_current_account, mock_add_task, db_session_with_containers: Session): """ Test updating an existing annotation. @@ -800,7 +810,7 @@ class TestConversationServiceMessageAnnotation: mock_add_task.delay.assert_not_called() @patch("services.annotation_service.current_account_with_tenant") - def test_get_annotation_list(self, mock_current_account, db_session_with_containers): + def test_get_annotation_list(self, mock_current_account, db_session_with_containers: Session): """ Test retrieving paginated annotation list. @@ -836,7 +846,7 @@ class TestConversationServiceMessageAnnotation: assert result_total == 5 @patch("services.annotation_service.current_account_with_tenant") - def test_get_annotation_list_with_keyword_search(self, mock_current_account, db_session_with_containers): + def test_get_annotation_list_with_keyword_search(self, mock_current_account, db_session_with_containers: Session): """ Test retrieving annotations with keyword filtering. @@ -885,7 +895,7 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_insert_annotation_directly(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_insert_annotation_directly(self, mock_current_account, mock_add_task, db_session_with_containers: Session): """ Test direct annotation insertion without message reference. @@ -919,7 +929,7 @@ class TestConversationServiceExport: Tests retrieving conversation data for export purposes. """ - def test_get_conversation_success(self, db_session_with_containers): + def test_get_conversation_success(self, db_session_with_containers: Session): """Test successful retrieval of conversation.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -937,7 +947,7 @@ class TestConversationServiceExport: # Assert assert result == conversation - def test_get_conversation_not_found(self, db_session_with_containers): + def test_get_conversation_not_found(self, db_session_with_containers: Session): """Test ConversationNotExistsError when conversation doesn't exist.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -949,7 +959,7 @@ class TestConversationServiceExport: ConversationService.get_conversation(app_model=app_model, conversation_id=str(uuid4()), user=user) @patch("services.annotation_service.current_account_with_tenant") - def test_export_annotation_list(self, mock_current_account, db_session_with_containers): + def test_export_annotation_list(self, mock_current_account, db_session_with_containers: Session): """Test exporting all annotations for an app.""" # Arrange app_model, account = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -977,7 +987,7 @@ class TestConversationServiceExport: # Assert assert len(result) == 10 - def test_get_message_success(self, db_session_with_containers): + def test_get_message_success(self, db_session_with_containers: Session): """Test successful retrieval of a message.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -1001,7 +1011,7 @@ class TestConversationServiceExport: # Assert assert result == message - def test_get_message_not_found(self, db_session_with_containers): + def test_get_message_not_found(self, db_session_with_containers: Session): """Test MessageNotExistsError when message doesn't exist.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -1012,7 +1022,7 @@ class TestConversationServiceExport: with pytest.raises(MessageNotExistsError): MessageService.get_message(app_model=app_model, user=user, message_id=str(uuid4())) - def test_get_conversation_for_end_user(self, db_session_with_containers): + def test_get_conversation_for_end_user(self, db_session_with_containers: Session): """ Test retrieving conversation created by end user via API. @@ -1038,7 +1048,7 @@ class TestConversationServiceExport: assert result == conversation @patch("services.conversation_service.delete_conversation_related_data") - def test_delete_conversation(self, mock_delete_task, db_session_with_containers): + def test_delete_conversation(self, mock_delete_task, db_session_with_containers: Session): """ Test conversation deletion with async cleanup. @@ -1071,7 +1081,7 @@ class TestConversationServiceExport: mock_delete_task.delay.assert_called_once_with(conversation_id) @patch("services.conversation_service.delete_conversation_related_data") - def test_delete_conversation_not_owned_by_account(self, mock_delete_task, db_session_with_containers): + def test_delete_conversation_not_owned_by_account(self, mock_delete_task, db_session_with_containers: Session): """ Test deletion is denied when conversation belongs to a different account. """ @@ -1102,7 +1112,7 @@ class TestConversationServiceExport: mock_delete_task.delay.assert_not_called() @patch("services.conversation_service.delete_conversation_related_data") - def test_delete_handles_exception_and_rollback(self, mock_delete_task, db_session_with_containers): + def test_delete_handles_exception_and_rollback(self, mock_delete_task, db_session_with_containers: Session): """ Test that delete propagates exceptions and does not trigger the cleanup task. diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py index 0b7bd9ca64..853630ad65 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py @@ -5,7 +5,8 @@ from unittest.mock import patch from uuid import uuid4 import pytest -from sqlalchemy.orm import sessionmaker +from flask import Flask +from sqlalchemy.orm import Session, sessionmaker from core.app.entities.app_invoke_entities import InvokeFrom from extensions.ext_database import db @@ -24,7 +25,7 @@ from services.errors.conversation import ( class ConversationServiceVariableIntegrationFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -149,7 +150,7 @@ class ConversationServiceVariableIntegrationFactory: @pytest.fixture -def real_conversation_service_session_factory(flask_app_with_containers): +def real_conversation_service_session_factory(flask_app_with_containers: Flask): del flask_app_with_containers real_session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) @@ -162,7 +163,7 @@ def real_conversation_service_session_factory(flask_app_with_containers): class TestConversationServiceVariables: def test_get_conversational_variable_success( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -200,7 +201,7 @@ class TestConversationServiceVariables: assert result.has_more is False def test_get_conversational_variable_with_last_id( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -242,7 +243,7 @@ class TestConversationServiceVariables: assert result.has_more is False def test_get_conversational_variable_last_id_not_found_raises_error( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -259,7 +260,7 @@ class TestConversationServiceVariables: ) def test_get_conversational_variable_sets_has_more( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -287,7 +288,7 @@ class TestConversationServiceVariables: assert result.has_more is True def test_update_conversation_variable_success( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -320,7 +321,7 @@ class TestConversationServiceVariables: assert result["updated_at"] == updated_at def test_update_conversation_variable_not_found_raises_error( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -337,7 +338,7 @@ class TestConversationServiceVariables: ) def test_update_conversation_variable_type_mismatch_raises_error( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -360,7 +361,7 @@ class TestConversationServiceVariables: ) def test_update_conversation_variable_integer_number_compatibility( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -390,7 +391,7 @@ class TestConversationServiceVariables: class TestConversationServicePaginationWithContainers: - def test_pagination_by_last_id_raises_error_when_last_id_missing(self, db_session_with_containers): + def test_pagination_by_last_id_raises_error_when_last_id_missing(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) @@ -404,7 +405,7 @@ class TestConversationServicePaginationWithContainers: invoke_from=InvokeFrom.WEB_APP, ) - def test_pagination_by_last_id_with_default_desc_updated_at(self, db_session_with_containers): + def test_pagination_by_last_id_with_default_desc_updated_at(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) base_time = datetime(2024, 1, 1, 8, 0, 0) @@ -442,7 +443,7 @@ class TestConversationServicePaginationWithContainers: assert newest.id != middle.id assert [conversation.id for conversation in result.data] == [oldest.id] - def test_pagination_by_last_id_with_name_sort(self, db_session_with_containers): + def test_pagination_by_last_id_with_name_sort(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) alpha = factory.create_conversation(db_session_with_containers, app, account, name="Alpha") @@ -462,7 +463,7 @@ class TestConversationServicePaginationWithContainers: assert alpha.id != beta.id assert [conversation.id for conversation in result.data] == [gamma.id] - def test_pagination_filters_to_end_user_api_source(self, db_session_with_containers): + def test_pagination_filters_to_end_user_api_source(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) end_user = factory.create_end_user(db_session_with_containers, app) @@ -493,7 +494,7 @@ class TestConversationServicePaginationWithContainers: assert account_conversation.id != end_user_conversation.id assert [conversation.id for conversation in result.data] == [end_user_conversation.id] - def test_pagination_filters_to_account_console_source(self, db_session_with_containers): + def test_pagination_filters_to_account_console_source(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) end_user = factory.create_end_user(db_session_with_containers, app) diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py b/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py index 02ab3f8314..638a962f18 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py @@ -3,7 +3,7 @@ from uuid import uuid4 import pytest -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import Session, sessionmaker from extensions.ext_database import db from graphon.variables import StringVariable @@ -13,7 +13,12 @@ from services.conversation_variable_updater import ConversationVariableNotFoundE class TestConversationVariableUpdater: def _create_conversation_variable( - self, db_session_with_containers, *, conversation_id: str, variable: StringVariable, app_id: str | None = None + self, + db_session_with_containers: Session, + *, + conversation_id: str, + variable: StringVariable, + app_id: str | None = None, ) -> ConversationVariable: row = ConversationVariable( id=variable.id, @@ -25,7 +30,7 @@ class TestConversationVariableUpdater: db_session_with_containers.commit() return row - def test_should_update_conversation_variable_data_and_commit(self, db_session_with_containers): + def test_should_update_conversation_variable_data_and_commit(self, db_session_with_containers: Session): conversation_id = str(uuid4()) variable = StringVariable(id=str(uuid4()), name="topic", value="old value") self._create_conversation_variable( @@ -42,7 +47,7 @@ class TestConversationVariableUpdater: assert row is not None assert row.data == updated_variable.model_dump_json() - def test_should_raise_not_found_when_variable_missing(self, db_session_with_containers): + def test_should_raise_not_found_when_variable_missing(self, db_session_with_containers: Session): conversation_id = str(uuid4()) variable = StringVariable(id=str(uuid4()), name="topic", value="value") updater = ConversationVariableUpdater(sessionmaker(bind=db.engine)) @@ -50,7 +55,7 @@ class TestConversationVariableUpdater: with pytest.raises(ConversationVariableNotFoundError, match="conversation variable not found in the database"): updater.update(conversation_id=conversation_id, variable=variable) - def test_should_do_nothing_when_flush_is_called(self, db_session_with_containers): + def test_should_do_nothing_when_flush_is_called(self, db_session_with_containers: Session): updater = ConversationVariableUpdater(sessionmaker(bind=db.engine)) result = updater.flush() diff --git a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py index 0f63d98642..09ba041244 100644 --- a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py +++ b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py @@ -3,6 +3,7 @@ from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.errors.error import QuotaExceededError from models import TenantCreditPool @@ -14,7 +15,7 @@ class TestCreditPoolService: def _create_tenant_id(self) -> str: return str(uuid4()) - def test_create_default_pool(self, db_session_with_containers): + def test_create_default_pool(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) @@ -25,7 +26,7 @@ class TestCreditPoolService: assert pool.quota_used == 0 assert pool.quota_limit > 0 - def test_get_pool_returns_pool_when_exists(self, db_session_with_containers): + def test_get_pool_returns_pool_when_exists(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() CreditPoolService.create_default_pool(tenant_id) @@ -35,17 +36,17 @@ class TestCreditPoolService: assert result.tenant_id == tenant_id assert result.pool_type == ProviderQuotaType.TRIAL - def test_get_pool_returns_none_when_not_exists(self, db_session_with_containers): + def test_get_pool_returns_none_when_not_exists(self, db_session_with_containers: Session): result = CreditPoolService.get_pool(tenant_id=self._create_tenant_id(), pool_type=ProviderQuotaType.TRIAL) assert result is None - def test_check_credits_available_returns_false_when_no_pool(self, db_session_with_containers): + def test_check_credits_available_returns_false_when_no_pool(self, db_session_with_containers: Session): result = CreditPoolService.check_credits_available(tenant_id=self._create_tenant_id(), credits_required=10) assert result is False - def test_check_credits_available_returns_true_when_sufficient(self, db_session_with_containers): + def test_check_credits_available_returns_true_when_sufficient(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() CreditPoolService.create_default_pool(tenant_id) @@ -53,7 +54,7 @@ class TestCreditPoolService: assert result is True - def test_check_credits_available_returns_false_when_insufficient(self, db_session_with_containers): + def test_check_credits_available_returns_false_when_insufficient(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) # Exhaust credits @@ -64,11 +65,11 @@ class TestCreditPoolService: assert result is False - def test_check_and_deduct_credits_raises_when_no_pool(self, db_session_with_containers): + def test_check_and_deduct_credits_raises_when_no_pool(self, db_session_with_containers: Session): with pytest.raises(QuotaExceededError, match="Credit pool not found"): CreditPoolService.check_and_deduct_credits(tenant_id=self._create_tenant_id(), credits_required=10) - def test_check_and_deduct_credits_raises_when_no_remaining(self, db_session_with_containers): + def test_check_and_deduct_credits_raises_when_no_remaining(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) pool.quota_used = pool.quota_limit @@ -77,7 +78,7 @@ class TestCreditPoolService: with pytest.raises(QuotaExceededError, match="No credits remaining"): CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=10) - def test_check_and_deduct_credits_deducts_required_amount(self, db_session_with_containers): + def test_check_and_deduct_credits_deducts_required_amount(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() CreditPoolService.create_default_pool(tenant_id) credits_required = 10 @@ -89,7 +90,7 @@ class TestCreditPoolService: pool = CreditPoolService.get_pool(tenant_id=tenant_id) assert pool.quota_used == credits_required - def test_check_and_deduct_credits_caps_at_remaining(self, db_session_with_containers): + def test_check_and_deduct_credits_caps_at_remaining(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) remaining = 5 diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py b/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py index 71c8874f79..f9898e2cfa 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py @@ -8,6 +8,7 @@ checks with testcontainers-backed infrastructure instead of database-chain mocks from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexTechniqueType from extensions.ext_database import db @@ -107,7 +108,7 @@ class DatasetPermissionTestDataFactory: class TestDatasetPermissionServiceGetPartialMemberList: """Verify partial-member list reads against persisted DatasetPermission rows.""" - def test_get_dataset_partial_member_list_with_members(self, db_session_with_containers): + def test_get_dataset_partial_member_list_with_members(self, db_session_with_containers: Session): """ Test retrieving partial member list with multiple members. """ @@ -138,7 +139,7 @@ class TestDatasetPermissionServiceGetPartialMemberList: assert set(result) == set(expected_account_ids) assert len(result) == 3 - def test_get_dataset_partial_member_list_with_single_member(self, db_session_with_containers): + def test_get_dataset_partial_member_list_with_single_member(self, db_session_with_containers: Session): """ Test retrieving partial member list with single member. """ @@ -160,7 +161,7 @@ class TestDatasetPermissionServiceGetPartialMemberList: assert set(result) == set(expected_account_ids) assert len(result) == 1 - def test_get_dataset_partial_member_list_empty(self, db_session_with_containers): + def test_get_dataset_partial_member_list_empty(self, db_session_with_containers: Session): """ Test retrieving partial member list when no members exist. """ @@ -179,7 +180,7 @@ class TestDatasetPermissionServiceGetPartialMemberList: class TestDatasetPermissionServiceUpdatePartialMemberList: """Verify partial-member list updates against persisted DatasetPermission rows.""" - def test_update_partial_member_list_add_new_members(self, db_session_with_containers): + def test_update_partial_member_list_add_new_members(self, db_session_with_containers: Session): """ Test adding new partial members to a dataset. """ @@ -203,7 +204,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert set(result) == {member_1.id, member_2.id} - def test_update_partial_member_list_replace_existing(self, db_session_with_containers): + def test_update_partial_member_list_replace_existing(self, db_session_with_containers: Session): """ Test replacing existing partial members with new ones. """ @@ -239,7 +240,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert set(result) == {new_member_1.id, new_member_2.id} - def test_update_partial_member_list_empty_list(self, db_session_with_containers): + def test_update_partial_member_list_empty_list(self, db_session_with_containers: Session): """ Test updating with empty member list (clearing all members). """ @@ -264,7 +265,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert result == [] - def test_update_partial_member_list_database_error_rollback(self, db_session_with_containers): + def test_update_partial_member_list_database_error_rollback(self, db_session_with_containers: Session): """ Test error handling and rollback on database error. """ @@ -313,7 +314,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: class TestDatasetPermissionServiceClearPartialMemberList: """Verify partial-member clearing against persisted DatasetPermission rows.""" - def test_clear_partial_member_list_success(self, db_session_with_containers): + def test_clear_partial_member_list_success(self, db_session_with_containers: Session): """ Test successful clearing of partial member list. """ @@ -338,7 +339,7 @@ class TestDatasetPermissionServiceClearPartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert result == [] - def test_clear_partial_member_list_empty_list(self, db_session_with_containers): + def test_clear_partial_member_list_empty_list(self, db_session_with_containers: Session): """ Test clearing partial member list when no members exist. """ @@ -353,7 +354,7 @@ class TestDatasetPermissionServiceClearPartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert result == [] - def test_clear_partial_member_list_database_error_rollback(self, db_session_with_containers): + def test_clear_partial_member_list_database_error_rollback(self, db_session_with_containers: Session): """ Test error handling and rollback on database error. """ @@ -398,7 +399,7 @@ class TestDatasetPermissionServiceClearPartialMemberList: class TestDatasetServiceCheckDatasetPermission: """Verify dataset access checks against persisted partial-member permissions.""" - def test_check_dataset_permission_different_tenant_should_fail(self, db_session_with_containers): + def test_check_dataset_permission_different_tenant_should_fail(self, db_session_with_containers: Session): """Test that users from different tenants cannot access dataset.""" owner, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.OWNER) other_user, _ = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.EDITOR) @@ -410,7 +411,7 @@ class TestDatasetServiceCheckDatasetPermission: with pytest.raises(NoPermissionError): DatasetService.check_dataset_permission(dataset, other_user) - def test_check_dataset_permission_owner_can_access_any_dataset(self, db_session_with_containers): + def test_check_dataset_permission_owner_can_access_any_dataset(self, db_session_with_containers: Session): """Test that tenant owners can access any dataset regardless of permission level.""" owner, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.OWNER) creator, _ = DatasetPermissionTestDataFactory.create_account_with_tenant( @@ -423,7 +424,7 @@ class TestDatasetServiceCheckDatasetPermission: DatasetService.check_dataset_permission(dataset, owner) - def test_check_dataset_permission_only_me_creator_can_access(self, db_session_with_containers): + def test_check_dataset_permission_only_me_creator_can_access(self, db_session_with_containers: Session): """Test ONLY_ME permission allows only the dataset creator to access.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.EDITOR) @@ -433,7 +434,7 @@ class TestDatasetServiceCheckDatasetPermission: DatasetService.check_dataset_permission(dataset, creator) - def test_check_dataset_permission_only_me_others_cannot_access(self, db_session_with_containers): + def test_check_dataset_permission_only_me_others_cannot_access(self, db_session_with_containers: Session): """Test ONLY_ME permission denies access to non-creators.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.NORMAL) other, _ = DatasetPermissionTestDataFactory.create_account_with_tenant( @@ -447,7 +448,7 @@ class TestDatasetServiceCheckDatasetPermission: with pytest.raises(NoPermissionError): DatasetService.check_dataset_permission(dataset, other) - def test_check_dataset_permission_all_team_allows_access(self, db_session_with_containers): + def test_check_dataset_permission_all_team_allows_access(self, db_session_with_containers: Session): """Test ALL_TEAM permission allows any team member to access the dataset.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.NORMAL) member, _ = DatasetPermissionTestDataFactory.create_account_with_tenant( @@ -460,7 +461,9 @@ class TestDatasetServiceCheckDatasetPermission: DatasetService.check_dataset_permission(dataset, member) - def test_check_dataset_permission_partial_members_with_permission_success(self, db_session_with_containers): + def test_check_dataset_permission_partial_members_with_permission_success( + self, db_session_with_containers: Session + ): """ Test that user with explicit permission can access partial_members dataset. """ @@ -485,7 +488,9 @@ class TestDatasetServiceCheckDatasetPermission: permissions = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert user.id in permissions - def test_check_dataset_permission_partial_members_without_permission_error(self, db_session_with_containers): + def test_check_dataset_permission_partial_members_without_permission_error( + self, db_session_with_containers: Session + ): """ Test error when user without permission tries to access partial_members dataset. """ @@ -506,7 +511,7 @@ class TestDatasetServiceCheckDatasetPermission: with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset"): DatasetService.check_dataset_permission(dataset, user) - def test_check_dataset_permission_partial_team_creator_can_access(self, db_session_with_containers): + def test_check_dataset_permission_partial_team_creator_can_access(self, db_session_with_containers: Session): """Test PARTIAL_TEAM permission allows creator to access without explicit permission.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.EDITOR) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service.py b/api/tests/test_containers_integration_tests/services/test_dataset_service.py index 0de3c64c4f..e6ee896a52 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service.py @@ -712,7 +712,7 @@ class TestDatasetServiceRetrievalConfiguration: class TestDocumentServicePauseRecoverRetry: """Tests for pause/recover/retry orchestration using real DB and Redis.""" - def _create_indexing_document(self, db_session_with_containers, indexing_status="indexing"): + def _create_indexing_document(self, db_session_with_containers: Session, indexing_status="indexing"): factory = DatasetServiceIntegrationDataFactory account, tenant = factory.create_account_with_tenant(db_session_with_containers) dataset = factory.create_dataset(db_session_with_containers, tenant.id, account.id) @@ -721,7 +721,7 @@ class TestDocumentServicePauseRecoverRetry: db_session_with_containers.commit() return doc, account - def test_pause_document_success(self, db_session_with_containers): + def test_pause_document_success(self, db_session_with_containers: Session): from extensions.ext_redis import redis_client from services.dataset_service import DocumentService @@ -740,7 +740,7 @@ class TestDocumentServicePauseRecoverRetry: assert redis_client.get(cache_key) is not None redis_client.delete(cache_key) - def test_pause_document_invalid_status_error(self, db_session_with_containers): + def test_pause_document_invalid_status_error(self, db_session_with_containers: Session): from services.dataset_service import DocumentService from services.errors.document import DocumentIndexingError @@ -751,7 +751,7 @@ class TestDocumentServicePauseRecoverRetry: with pytest.raises(DocumentIndexingError): DocumentService.pause_document(doc) - def test_recover_document_success(self, db_session_with_containers): + def test_recover_document_success(self, db_session_with_containers: Session): from extensions.ext_redis import redis_client from services.dataset_service import DocumentService @@ -775,7 +775,7 @@ class TestDocumentServicePauseRecoverRetry: assert redis_client.get(cache_key) is None recover_task.delay.assert_called_once_with(doc.dataset_id, doc.id) - def test_retry_document_indexing_success(self, db_session_with_containers): + def test_retry_document_indexing_success(self, db_session_with_containers: Session): from extensions.ext_redis import redis_client from services.dataset_service import DocumentService diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py index c486ff5613..08de79f4b7 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py @@ -6,6 +6,7 @@ from unittest.mock import Mock, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from models.account import Account, Tenant, TenantAccountJoin from services.dataset_service import DatasetService @@ -48,7 +49,7 @@ class TestDatasetServiceCreateRagPipelineDataset: permission="only_me", ) - def test_create_rag_pipeline_dataset_raises_when_current_user_id_is_none(self, db_session_with_containers): + def test_create_rag_pipeline_dataset_raises_when_current_user_id_is_none(self, db_session_with_containers: Session): tenant, _ = self._create_tenant_and_account(db_session_with_containers) mock_user = Mock(id=None) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py index 3cac964d89..c43a5d5978 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py @@ -3,6 +3,8 @@ from unittest.mock import patch from uuid import uuid4 +from sqlalchemy.orm import Session + from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from models.dataset import Dataset, Document @@ -101,7 +103,7 @@ class DatasetDeleteIntegrationDataFactory: class TestDatasetServiceDeleteDataset: """Integration coverage for DatasetService.delete_dataset using testcontainers.""" - def test_delete_dataset_with_documents_success(self, db_session_with_containers): + def test_delete_dataset_with_documents_success(self, db_session_with_containers: Session): """Delete a dataset with documents and dispatch cleanup through the real signal handler.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -144,7 +146,7 @@ class TestDatasetServiceDeleteDataset: dataset.pipeline_id, ) - def test_delete_empty_dataset_success(self, db_session_with_containers): + def test_delete_empty_dataset_success(self, db_session_with_containers: Session): """Delete an empty dataset without scheduling cleanup when both gating fields are absent.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -172,7 +174,7 @@ class TestDatasetServiceDeleteDataset: assert db_session_with_containers.get(Dataset, dataset.id) is None clean_dataset_delay.assert_not_called() - def test_delete_dataset_with_partial_none_values(self, db_session_with_containers): + def test_delete_dataset_with_partial_none_values(self, db_session_with_containers: Session): """Delete a dataset without cleanup when indexing_technique is missing but doc_form resolves.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -200,7 +202,7 @@ class TestDatasetServiceDeleteDataset: assert db_session_with_containers.get(Dataset, dataset.id) is None clean_dataset_delay.assert_not_called() - def test_delete_dataset_with_doc_form_none_indexing_technique_exists(self, db_session_with_containers): + def test_delete_dataset_with_doc_form_none_indexing_technique_exists(self, db_session_with_containers: Session): """Delete a dataset without cleanup when indexing exists but doc_form resolves to None.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -228,7 +230,7 @@ class TestDatasetServiceDeleteDataset: assert db_session_with_containers.get(Dataset, dataset.id) is None clean_dataset_delay.assert_not_called() - def test_delete_dataset_not_found(self, db_session_with_containers): + def test_delete_dataset_not_found(self, db_session_with_containers: Session): """Return False without scheduling cleanup when the target dataset does not exist.""" # Arrange owner, _ = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py index 2bec703f0c..0c089e506b 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py @@ -6,6 +6,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound from core.rag.index_processor.constant.index_type import IndexStructureType @@ -119,13 +120,13 @@ def current_user_mock(): yield current_user -def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers): +def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_document(dataset.id, None) is None -def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers): +def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset) @@ -135,7 +136,7 @@ def test_get_document_queries_by_dataset_and_document_id(db_session_with_contain assert result.id == document.id -def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers): +def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) result = DocumentService.get_documents_by_ids(dataset.id, []) @@ -143,7 +144,7 @@ def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_cont assert result == [] -def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers): +def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) doc_a = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, name="a.txt") doc_b = DocumentServiceIntegrationFactory.create_document( @@ -158,13 +159,13 @@ def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers assert {document.id for document in result} == {doc_a.id, doc_b.id} -def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers): +def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.update_documents_need_summary(dataset.id, []) == 0 -def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers): +def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) paragraph_doc = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -195,7 +196,7 @@ def test_update_documents_need_summary_updates_matching_non_qa_documents(db_sess assert refreshed_qa.need_summary is True -def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers): +def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -215,7 +216,7 @@ def test_get_document_download_url_uses_signed_url_helper(db_session_with_contai get_url.assert_called_once_with(upload_file_id=upload_file.id, as_attachment=True) -def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -232,7 +233,9 @@ def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type ) -def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -248,7 +251,7 @@ def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file ) -def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -265,7 +268,9 @@ def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_sessio assert result == "99" -def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -278,7 +283,7 @@ def test_get_upload_file_for_upload_file_document_raises_when_file_service_retur DocumentService._get_upload_file_for_upload_file_document(document) -def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -296,7 +301,9 @@ def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session assert result.id == upload_file.id -def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with pytest.raises(NotFound, match="Document not found"): @@ -307,7 +314,9 @@ def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_doc ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -329,7 +338,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_a ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -345,7 +356,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload ) -def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file_a = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -395,7 +408,7 @@ def test_prepare_document_batch_download_zip_raises_not_found_for_missing_datase def test_prepare_document_batch_download_zip_translates_permission_error_to_forbidden( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -418,7 +431,7 @@ def test_prepare_document_batch_download_zip_translates_permission_error_to_forb def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_order( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -461,7 +474,7 @@ def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_o assert download_name.endswith(".zip") -def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers): +def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) enabled_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -480,7 +493,9 @@ def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_co assert [document.id for document in result] == [enabled_document.id] -def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents(db_session_with_containers): +def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) available_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -501,7 +516,7 @@ def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchive assert [document.id for document in result] == [available_document.id] -def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers): +def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) error_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -526,7 +541,7 @@ def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db assert {document.id for document in result} == {error_document.id, paused_document.id} -def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers): +def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) batch = f"batch-{uuid4()}" matching_document = DocumentServiceIntegrationFactory.create_document( @@ -549,7 +564,7 @@ def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_cont assert [document.id for document in result] == [matching_document.id] -def test_get_document_file_detail_returns_upload_file(db_session_with_containers): +def test_get_document_file_detail_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -563,7 +578,7 @@ def test_get_document_file_detail_returns_upload_file(db_session_with_containers assert result.id == upload_file.id -def test_delete_document_emits_signal_and_commits(db_session_with_containers): +def test_delete_document_emits_signal_and_commits(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -588,7 +603,7 @@ def test_delete_document_emits_signal_and_commits(db_session_with_containers): ) -def test_delete_documents_ignores_empty_input(db_session_with_containers): +def test_delete_documents_ignores_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with patch("services.dataset_service.batch_clean_document_task.delay") as delay: @@ -597,7 +612,7 @@ def test_delete_documents_ignores_empty_input(db_session_with_containers): delay.assert_not_called() -def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers): +def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) dataset.chunk_structure = IndexStructureType.PARAGRAPH_INDEX db_session_with_containers.commit() @@ -637,14 +652,14 @@ def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_wi assert set(args[3]) == {upload_file_a.id, upload_file_b.id} -def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers): +def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, position=3) assert DocumentService.get_documents_position(dataset.id) == 4 -def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers): +def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_documents_position(dataset.id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py index 1b4179c9c7..0603a1e27f 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py @@ -6,6 +6,7 @@ from unittest.mock import patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import NotFound @@ -363,7 +364,7 @@ class TestDatasetServicePermissionsAndLifecycle: DatasetService.check_dataset_operator_permission(user=operator, dataset=dataset) - def test_update_dataset_api_status_raises_not_found_for_missing_dataset(self, flask_app_with_containers): + def test_update_dataset_api_status_raises_not_found_for_missing_dataset(self, flask_app_with_containers: Flask): with flask_app_with_containers.app_context(): with pytest.raises(NotFound, match="Dataset not found"): DatasetService.update_dataset_api_status(str(uuid4()), True) @@ -473,7 +474,7 @@ class TestDatasetCollectionBindingServiceIntegration: assert persisted.type == "dataset" assert persisted.collection_name - def test_get_dataset_collection_binding_by_id_and_type_raises_when_missing(self, flask_app_with_containers): + def test_get_dataset_collection_binding_by_id_and_type_raises_when_missing(self, flask_app_with_containers: Flask): with flask_app_with_containers.app_context(): with pytest.raises(ValueError, match="Dataset collection binding not found"): DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(str(uuid4())) diff --git a/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py b/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py index fe426ae516..69c39b8bfb 100644 --- a/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py +++ b/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py @@ -6,6 +6,7 @@ from datetime import UTC, datetime, timedelta from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from graphon.enums import WorkflowExecutionStatus from models.enums import CreatorUserRole, WorkflowRunTriggeredFrom @@ -46,7 +47,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.commit() return run - def _create_archive_log(self, db_session_with_containers, *, run: WorkflowRun) -> None: + def _create_archive_log(self, db_session_with_containers: Session, *, run: WorkflowRun) -> None: archive_log = WorkflowArchiveLog( tenant_id=run.tenant_id, app_id=run.app_id, @@ -72,7 +73,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.add(archive_log) db_session_with_containers.commit() - def test_delete_by_run_id_returns_error_when_run_missing(self, db_session_with_containers): + def test_delete_by_run_id_returns_error_when_run_missing(self, db_session_with_containers: Session): deleter = ArchivedWorkflowRunDeletion() missing_run_id = str(uuid4()) @@ -81,7 +82,7 @@ class TestArchivedWorkflowRunDeletion: assert result.success is False assert result.error == f"Workflow run {missing_run_id} not found" - def test_delete_by_run_id_returns_error_when_not_archived(self, db_session_with_containers): + def test_delete_by_run_id_returns_error_when_not_archived(self, db_session_with_containers: Session): tenant_id = str(uuid4()) run = self._create_workflow_run( db_session_with_containers, @@ -95,7 +96,7 @@ class TestArchivedWorkflowRunDeletion: assert result.success is False assert result.error == f"Workflow run {run.id} is not archived" - def test_delete_batch_uses_repo(self, db_session_with_containers): + def test_delete_batch_uses_repo(self, db_session_with_containers: Session): tenant_id = str(uuid4()) base_time = datetime.now(UTC) run1 = self._create_workflow_run(db_session_with_containers, tenant_id=tenant_id, created_at=base_time) @@ -124,7 +125,7 @@ class TestArchivedWorkflowRunDeletion: ).all() assert remaining_runs == [] - def test_delete_run_calls_repo(self, db_session_with_containers): + def test_delete_run_calls_repo(self, db_session_with_containers: Session): tenant_id = str(uuid4()) run = self._create_workflow_run( db_session_with_containers, @@ -142,7 +143,7 @@ class TestArchivedWorkflowRunDeletion: deleted_run = db_session_with_containers.get(WorkflowRun, run_id) assert deleted_run is None - def test_delete_run_dry_run(self, db_session_with_containers): + def test_delete_run_dry_run(self, db_session_with_containers: Session): """Dry run should return success without actually deleting.""" tenant_id = str(uuid4()) run = self._create_workflow_run( @@ -161,7 +162,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.expire_all() assert db_session_with_containers.get(WorkflowRun, run_id) is not None - def test_delete_run_exception_returns_error(self, db_session_with_containers): + def test_delete_run_exception_returns_error(self, db_session_with_containers: Session): """Exception during deletion should return failure result.""" from unittest.mock import MagicMock, patch @@ -183,7 +184,7 @@ class TestArchivedWorkflowRunDeletion: assert result.success is False assert result.error == "Database error" - def test_delete_by_run_id_success(self, db_session_with_containers): + def test_delete_by_run_id_success(self, db_session_with_containers: Session): """Successfully delete an archived workflow run by ID.""" tenant_id = str(uuid4()) base_time = datetime.now(UTC) @@ -202,7 +203,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.expunge_all() assert db_session_with_containers.get(WorkflowRun, run_id) is None - def test_get_workflow_run_repo_caches_instance(self, db_session_with_containers): + def test_get_workflow_run_repo_caches_instance(self, db_session_with_containers: Session): """_get_workflow_run_repo should return a cached repo on subsequent calls.""" deleter = ArchivedWorkflowRunDeletion() diff --git a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py index c0047df810..383a5f6374 100644 --- a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py +++ b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py @@ -2,6 +2,7 @@ import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document @@ -58,7 +59,7 @@ def _create_document( return document -def test_build_display_status_filters_available(db_session_with_containers): +def test_build_display_status_filters_available(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) available_doc = _create_document( db_session_with_containers, @@ -97,7 +98,7 @@ def test_build_display_status_filters_available(db_session_with_containers): assert [row.id for row in rows] == [available_doc.id] -def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers): +def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) waiting_doc = _create_document( db_session_with_containers, @@ -121,7 +122,7 @@ def test_apply_display_status_filter_applies_when_status_present(db_session_with assert [row.id for row in rows] == [waiting_doc.id] -def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers): +def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) doc1 = _create_document( db_session_with_containers, diff --git a/api/tests/test_containers_integration_tests/services/test_end_user_service.py b/api/tests/test_containers_integration_tests/services/test_end_user_service.py index cafabc939b..3f611d92f7 100644 --- a/api/tests/test_containers_integration_tests/services/test_end_user_service.py +++ b/api/tests/test_containers_integration_tests/services/test_end_user_service.py @@ -4,8 +4,10 @@ from unittest.mock import patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.model import App, DefaultEndUserSessionID, EndUser from services.end_user_service import EndUserService @@ -15,7 +17,7 @@ class TestEndUserServiceFactory: """Factory class for creating test data and mock objects for end user service tests.""" @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -34,7 +36,7 @@ class TestEndUserServiceFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -102,7 +104,7 @@ class TestEndUserServiceGetOrCreateEndUser: """Provide test data factory.""" return TestEndUserServiceFactory() - def test_get_or_create_end_user_with_custom_user_id(self, db_session_with_containers, factory): + def test_get_or_create_end_user_with_custom_user_id(self, db_session_with_containers: Session, factory): """Test getting or creating end user with custom user_id.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -118,7 +120,7 @@ class TestEndUserServiceGetOrCreateEndUser: assert result.type == InvokeFrom.SERVICE_API assert result.is_anonymous is False - def test_get_or_create_end_user_without_user_id(self, db_session_with_containers, factory): + def test_get_or_create_end_user_without_user_id(self, db_session_with_containers: Session, factory): """Test getting or creating end user without user_id uses default session.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -131,7 +133,7 @@ class TestEndUserServiceGetOrCreateEndUser: # Verify _is_anonymous is set correctly (property always returns False) assert result._is_anonymous is True - def test_get_existing_end_user(self, db_session_with_containers, factory): + def test_get_existing_end_user(self, db_session_with_containers: Session, factory): """Test retrieving an existing end user.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -167,7 +169,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: """Provide test data factory.""" return TestEndUserServiceFactory() - def test_create_end_user_service_api_type(self, db_session_with_containers, factory): + def test_create_end_user_service_api_type(self, db_session_with_containers: Session, factory): """Test creating new end user with SERVICE_API type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -189,7 +191,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.app_id == app_id assert result.session_id == user_id - def test_create_end_user_web_app_type(self, db_session_with_containers, factory): + def test_create_end_user_web_app_type(self, db_session_with_containers: Session, factory): """Test creating new end user with WEB_APP type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -209,7 +211,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.type == InvokeFrom.WEB_APP @patch("services.end_user_service.logger") - def test_upgrade_legacy_end_user_type(self, mock_logger, db_session_with_containers, factory): + def test_upgrade_legacy_end_user_type(self, mock_logger, db_session_with_containers: Session, factory): """Test upgrading legacy end user with different type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -243,7 +245,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert "Upgrading legacy EndUser" in log_call @patch("services.end_user_service.logger") - def test_get_existing_end_user_matching_type(self, mock_logger, db_session_with_containers, factory): + def test_get_existing_end_user_matching_type(self, mock_logger, db_session_with_containers: Session, factory): """Test retrieving existing end user with matching type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -272,7 +274,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.type == InvokeFrom.SERVICE_API mock_logger.info.assert_not_called() - def test_create_anonymous_user_with_default_session(self, db_session_with_containers, factory): + def test_create_anonymous_user_with_default_session(self, db_session_with_containers: Session, factory): """Test creating anonymous user when user_id is None.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -293,7 +295,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result._is_anonymous is True assert result.external_user_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID - def test_query_ordering_prioritizes_matching_type(self, db_session_with_containers, factory): + def test_query_ordering_prioritizes_matching_type(self, db_session_with_containers: Session, factory): """Test that query ordering prioritizes records with matching type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -328,7 +330,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.id == matching.id assert result.id != non_matching.id - def test_external_user_id_matches_session_id(self, db_session_with_containers, factory): + def test_external_user_id_matches_session_id(self, db_session_with_containers: Session, factory): """Test that external_user_id is set to match session_id.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -357,7 +359,9 @@ class TestEndUserServiceGetOrCreateEndUserByType: InvokeFrom.DEBUGGER, ], ) - def test_create_end_user_with_different_invoke_types(self, db_session_with_containers, invoke_type, factory): + def test_create_end_user_with_different_invoke_types( + self, db_session_with_containers: Session, invoke_type, factory + ): """Test creating end users with different InvokeFrom types.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -385,7 +389,7 @@ class TestEndUserServiceGetEndUserById: """Provide test data factory.""" return TestEndUserServiceFactory() - def test_get_end_user_by_id_returns_end_user(self, db_session_with_containers, factory): + def test_get_end_user_by_id_returns_end_user(self, db_session_with_containers: Session, factory): app = factory.create_app_and_account(db_session_with_containers) existing_user = factory.create_end_user( db_session_with_containers, @@ -404,7 +408,7 @@ class TestEndUserServiceGetEndUserById: assert result is not None assert result.id == existing_user.id - def test_get_end_user_by_id_returns_none(self, db_session_with_containers, factory): + def test_get_end_user_by_id_returns_none(self, db_session_with_containers: Session, factory): app = factory.create_app_and_account(db_session_with_containers) result = EndUserService.get_end_user_by_id( @@ -423,7 +427,7 @@ class TestEndUserServiceCreateBatch: def factory(self): return TestEndUserServiceFactory() - def _create_multiple_apps(self, db_session_with_containers, factory, count: int = 3): + def _create_multiple_apps(self, db_session_with_containers: Session, factory, count: int = 3): """Create multiple apps under the same tenant.""" first_app = factory.create_app_and_account(db_session_with_containers) tenant_id = first_app.tenant_id @@ -452,13 +456,13 @@ class TestEndUserServiceCreateBatch: all_apps = db_session_with_containers.query(App).filter(App.tenant_id == tenant_id).all() return tenant_id, all_apps - def test_create_batch_empty_app_ids(self, db_session_with_containers): + def test_create_batch_empty_app_ids(self, db_session_with_containers: Session): result = EndUserService.create_end_user_batch( type=InvokeFrom.SERVICE_API, tenant_id=str(uuid4()), app_ids=[], user_id="user-1" ) assert result == {} - def test_create_batch_creates_users_for_all_apps(self, db_session_with_containers, factory): + def test_create_batch_creates_users_for_all_apps(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=3) app_ids = [a.id for a in apps] user_id = f"user-{uuid4()}" @@ -473,7 +477,7 @@ class TestEndUserServiceCreateBatch: assert result[app_id].session_id == user_id assert result[app_id].type == InvokeFrom.SERVICE_API - def test_create_batch_default_session_id(self, db_session_with_containers, factory): + def test_create_batch_default_session_id(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=2) app_ids = [a.id for a in apps] @@ -486,7 +490,7 @@ class TestEndUserServiceCreateBatch: assert end_user.session_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID assert end_user._is_anonymous is True - def test_create_batch_deduplicate_app_ids(self, db_session_with_containers, factory): + def test_create_batch_deduplicate_app_ids(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=2) app_ids = [apps[0].id, apps[1].id, apps[0].id, apps[1].id] user_id = f"user-{uuid4()}" @@ -497,7 +501,7 @@ class TestEndUserServiceCreateBatch: assert len(result) == 2 - def test_create_batch_returns_existing_users(self, db_session_with_containers, factory): + def test_create_batch_returns_existing_users(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=2) app_ids = [a.id for a in apps] user_id = f"user-{uuid4()}" @@ -516,7 +520,7 @@ class TestEndUserServiceCreateBatch: for app_id in app_ids: assert first_result[app_id].id == second_result[app_id].id - def test_create_batch_partial_existing_users(self, db_session_with_containers, factory): + def test_create_batch_partial_existing_users(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=3) user_id = f"user-{uuid4()}" @@ -545,7 +549,7 @@ class TestEndUserServiceCreateBatch: "invoke_type", [InvokeFrom.SERVICE_API, InvokeFrom.WEB_APP, InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER], ) - def test_create_batch_all_invoke_types(self, db_session_with_containers, invoke_type, factory): + def test_create_batch_all_invoke_types(self, db_session_with_containers: Session, invoke_type, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=1) user_id = f"user-{uuid4()}" diff --git a/api/tests/test_containers_integration_tests/services/test_feature_service.py b/api/tests/test_containers_integration_tests/services/test_feature_service.py index 315936d721..a678e37b41 100644 --- a/api/tests/test_containers_integration_tests/services/test_feature_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feature_service.py @@ -2,6 +2,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from enums.cloud_plan import CloudPlan from services.feature_service import ( @@ -81,7 +82,7 @@ class TestFeatureService: fake = Faker() return fake.uuid4() - def test_get_features_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test successful feature retrieval with billing and enterprise enabled. @@ -156,7 +157,7 @@ class TestFeatureService: tenant_id ) - def test_get_features_sandbox_plan(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_sandbox_plan(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test feature retrieval for sandbox plan with specific limitations. @@ -222,7 +223,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) - def test_get_knowledge_rate_limit_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_knowledge_rate_limit_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful knowledge rate limit retrieval with billing enabled. @@ -255,7 +258,7 @@ class TestFeatureService: tenant_id ) - def test_get_system_features_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test successful system features retrieval with enterprise and marketplace enabled. @@ -332,7 +335,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_system_features_unauthenticated(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_unauthenticated( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval for an unauthenticated user. @@ -386,7 +391,9 @@ class TestFeatureService: # Marketplace should be visible assert result.enable_marketplace is True - def test_get_system_features_basic_config(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_basic_config( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval with basic configuration (no enterprise). @@ -436,7 +443,9 @@ class TestFeatureService: # Verify plugin package size (uses default value from dify_config) assert result.max_plugin_package_size == 15728640 - def test_get_features_billing_disabled(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_billing_disabled( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval when billing is disabled. @@ -492,7 +501,7 @@ class TestFeatureService: assert result.webapp_copyright_enabled is False def test_get_knowledge_rate_limit_billing_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test knowledge rate limit retrieval when billing is disabled. @@ -523,7 +532,9 @@ class TestFeatureService: # Verify no billing service calls mock_external_service_dependencies["billing_service"].get_knowledge_rate_limit.assert_not_called() - def test_get_features_enterprise_only(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_enterprise_only( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with enterprise enabled but billing disabled. @@ -583,7 +594,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_not_called() def test_get_system_features_enterprise_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval when enterprise is disabled. @@ -633,14 +644,14 @@ class TestFeatureService: assert result.max_plugin_package_size == 15728640 # Verify default license status - assert result.license.status.value == "none" + assert result.license.status == "none" assert result.license.expired_at == "" assert result.license.workspaces.enabled is False # Verify no enterprise service calls mock_external_service_dependencies["enterprise_service"].get_info.assert_not_called() - def test_get_features_no_tenant_id(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_no_tenant_id(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test feature retrieval without tenant ID (billing disabled). @@ -686,7 +697,9 @@ class TestFeatureService: # Verify no billing service calls mock_external_service_dependencies["billing_service"].get_info.assert_not_called() - def test_get_features_partial_billing_info(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_partial_billing_info( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with partial billing information. @@ -746,7 +759,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) - def test_get_features_edge_case_vector_space(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_vector_space( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case vector space configuration. @@ -807,7 +822,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_webapp_auth( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with edge case webapp auth configuration. @@ -863,7 +878,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_features_edge_case_members_quota(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_members_quota( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case members quota configuration. @@ -924,7 +941,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_plugin_installation_permission_scopes( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with different plugin installation permission scopes. @@ -1023,7 +1040,7 @@ class TestFeatureService: assert result.plugin_installation_permission.restrict_to_marketplace_only is True def test_get_features_workspace_members_missing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval when workspace members info is missing from enterprise. @@ -1064,7 +1081,9 @@ class TestFeatureService: tenant_id ) - def test_get_system_features_license_inactive(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_license_inactive( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval with inactive license. @@ -1117,7 +1136,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_system_features_partial_enterprise_info( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with partial enterprise information. @@ -1186,7 +1205,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_features_edge_case_limits(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_limits( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case limit values. @@ -1244,7 +1265,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_protocols( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with edge case protocol values. @@ -1297,7 +1318,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_features_edge_case_education(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_education( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case education configuration. @@ -1353,7 +1376,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_license_limitation_model_is_available( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test LicenseLimitationModel.is_available method with various scenarios. @@ -1394,7 +1417,7 @@ class TestFeatureService: assert exact_limit.is_available(3) is True def test_get_features_workspace_members_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval when workspace members are disabled in enterprise. @@ -1433,7 +1456,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_workspace_info.assert_called_once_with(tenant_id) - def test_get_system_features_license_expired(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_license_expired( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval with expired license. @@ -1486,7 +1511,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_features_edge_case_docs_processing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with edge case document processing configuration. @@ -1544,7 +1569,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_branding( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with edge case branding configuration. @@ -1606,7 +1631,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_features_edge_case_annotation_quota( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with edge case annotation quota configuration. @@ -1668,7 +1693,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_features_edge_case_documents_upload( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with edge case documents upload settings. @@ -1733,7 +1758,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_license_lost( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features with lost license status. @@ -1784,7 +1809,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_features_edge_case_education_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with education feature disabled. diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index 3dcd6586e2..a4663450d4 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -23,7 +23,7 @@ class TestFeedbackService: """Test FeedbackService methods.""" @pytest.fixture - def mock_db_session(self, monkeypatch): + def mock_db_session(self, monkeypatch: pytest.MonkeyPatch): """Mock database session.""" mock_session = mock.Mock() monkeypatch.setattr(db, "session", mock_session) diff --git a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test.py b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test.py index 80f9083e81..a46698a6b1 100644 --- a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test.py +++ b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test.py @@ -1,8 +1,12 @@ import json import uuid +from io import BytesIO from unittest.mock import MagicMock import pytest +from flask.testing import FlaskClient +from sqlalchemy import select +from sqlalchemy.orm import Session from core.workflow.human_input_adapter import ( EmailDeliveryConfig, @@ -11,14 +15,21 @@ from core.workflow.human_input_adapter import ( ExternalRecipient, ) from graphon.enums import BuiltinNodeTypes -from graphon.nodes.human_input.entities import HumanInputNodeData +from graphon.nodes.human_input.entities import FileInputConfig, HumanInputNodeData +from graphon.nodes.human_input.enums import HumanInputFormKind from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole -from models.model import App, AppMode +from models.human_input import HumanInputForm, HumanInputFormRecipient, HumanInputFormUploadFile +from models.model import App, AppMode, UploadFile from models.workflow import Workflow, WorkflowType from services.workflow_service import WorkflowService -def _create_app_with_draft_workflow(session, *, delivery_method_id: uuid.UUID) -> tuple[App, Account]: +def _create_app_with_draft_workflow( + session: Session, + *, + delivery_method_id: uuid.UUID, + include_file_input: bool = False, +) -> tuple[App, Account]: tenant = Tenant(name="Test Tenant") account = Account(name="Tester", email="tester@example.com") session.add_all([tenant, account]) @@ -65,7 +76,7 @@ def _create_app_with_draft_workflow(session, *, delivery_method_id: uuid.UUID) - title="Human Input", delivery_methods=[email_method], form_content="Hello Human Input", - inputs=[], + inputs=[FileInputConfig(output_variable_name="attachment")] if include_file_input else [], user_actions=[], ).model_dump(mode="json") node_data["type"] = BuiltinNodeTypes.HUMAN_INPUT @@ -110,3 +121,71 @@ def test_human_input_delivery_test_sends_email( assert send_mock.call_count == 1 assert send_mock.call_args.kwargs["to"] == "recipient@example.com" + + +def test_human_input_delivery_test_form_accepts_file_upload( + db_session_with_containers: Session, + test_client_with_containers: FlaskClient, + monkeypatch: pytest.MonkeyPatch, +) -> None: + delivery_method_id = uuid.uuid4() + app, account = _create_app_with_draft_workflow( + db_session_with_containers, + delivery_method_id=delivery_method_id, + include_file_input=True, + ) + + monkeypatch.setattr("services.human_input_delivery_test_service.mail.is_inited", lambda: True) + monkeypatch.setattr("services.human_input_delivery_test_service.mail.send", MagicMock()) + + WorkflowService().test_human_input_delivery( + app_model=app, + account=account, + node_id="human-node", + delivery_method_id=str(delivery_method_id), + ) + + form = db_session_with_containers.scalar( + select(HumanInputForm) + .where( + HumanInputForm.app_id == app.id, + HumanInputForm.form_kind == HumanInputFormKind.DELIVERY_TEST, + HumanInputForm.workflow_run_id.is_(None), + ) + .limit(1) + ) + assert form is not None + recipient = db_session_with_containers.scalar( + select(HumanInputFormRecipient).where(HumanInputFormRecipient.form_id == form.id).limit(1) + ) + assert recipient is not None + assert recipient.access_token is not None + + token_response = test_client_with_containers.post(f"/api/form/human_input/{recipient.access_token}/upload-token") + assert token_response.status_code == 200 + upload_token = token_response.get_json()["upload_token"] + + upload_response = test_client_with_containers.post( + "/api/form/human_input/files/upload", + data={"file": (BytesIO(b"delivery test content"), "evidence.txt")}, + content_type="multipart/form-data", + headers={"Authorization": f"Bearer {upload_token}"}, + ) + + assert upload_response.status_code == 201, upload_response.get_data(as_text=True) + upload_file_id = upload_response.get_json()["id"] + + db_session_with_containers.expire_all() + upload_file = db_session_with_containers.get(UploadFile, upload_file_id) + assert upload_file is not None + assert upload_file.tenant_id == app.tenant_id + assert upload_file.created_by == account.id + link = db_session_with_containers.scalar( + select(HumanInputFormUploadFile) + .where( + HumanInputFormUploadFile.form_id == form.id, + HumanInputFormUploadFile.upload_file_id == upload_file_id, + ) + .limit(1) + ) + assert link is not None diff --git a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py index ed75363f3b..bfc2af6509 100644 --- a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py +++ b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py @@ -6,6 +6,7 @@ from uuid import uuid4 import pytest from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session from configs import dify_config from core.workflow.human_input_adapter import ( @@ -88,7 +89,7 @@ class TestDeliveryTestRegistry: with pytest.raises(DeliveryTestUnsupportedError, match="Delivery method does not support test send."): registry.dispatch(context=context, method=method) - def test_default(self, flask_app_with_containers, db_session_with_containers): + def test_default(self, flask_app_with_containers, db_session_with_containers: Session): registry = DeliveryTestRegistry.default() assert len(registry._handlers) == 1 assert isinstance(registry._handlers[0], EmailDeliveryTestHandler) @@ -121,7 +122,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestUnsupportedError): handler.send_test(context=MagicMock(), method=MagicMock()) - def test_send_test_feature_disabled(self, monkeypatch): + def test_send_test_feature_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -136,7 +137,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Email delivery is not available"): handler.send_test(context=context, method=method) - def test_send_test_mail_not_inited(self, monkeypatch): + def test_send_test_mail_not_inited(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -153,7 +154,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Mail client is not initialized."): handler.send_test(context=context, method=method) - def test_send_test_no_recipients(self, monkeypatch): + def test_send_test_no_recipients(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -172,7 +173,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="No recipients configured"): handler.send_test(context=context, method=method) - def test_send_test_success(self, monkeypatch): + def test_send_test_success(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -208,7 +209,7 @@ class TestEmailDeliveryTestHandler: assert kwargs["to"] == "test@example.com" assert "RENDERED_Subj" in kwargs["subject"] - def test_send_test_sanitizes_subject(self, monkeypatch): + def test_send_test_sanitizes_subject(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -260,7 +261,7 @@ class TestEmailDeliveryTestHandler: ) assert handler._resolve_recipients(tenant_id="t1", method=method) == ["ext@example.com"] - def test_resolve_recipients_member(self, flask_app_with_containers, db_session_with_containers): + def test_resolve_recipients_member(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) account = Account(name="Test User", email="member@example.com") db_session_with_containers.add(account) @@ -282,7 +283,7 @@ class TestEmailDeliveryTestHandler: ) assert handler._resolve_recipients(tenant_id=tenant_id, method=method) == ["member@example.com"] - def test_resolve_recipients_whole_workspace(self, flask_app_with_containers, db_session_with_containers): + def test_resolve_recipients_whole_workspace(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) account1 = Account(name="User 1", email=f"u1-{uuid4()}@example.com") account2 = Account(name="User 2", email=f"u2-{uuid4()}@example.com") diff --git a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py index 44e5a82868..52ebc0131f 100644 --- a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py +++ b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest +from sqlalchemy.orm import Session from services.message_service import MessageService from tests.test_containers_integration_tests.helpers.execution_extra_content import ( @@ -9,7 +10,7 @@ from tests.test_containers_integration_tests.helpers.execution_extra_content imp @pytest.mark.usefixtures("flask_req_ctx_with_containers") -def test_pagination_returns_extra_contents(db_session_with_containers): +def test_pagination_returns_extra_contents(db_session_with_containers: Session): fixture = create_human_input_message_fixture(db_session_with_containers) pagination = MessageService.pagination_by_first_id( diff --git a/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py b/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py index cd63d3ad6c..1a1efe0337 100644 --- a/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py +++ b/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py @@ -165,7 +165,7 @@ class TestMessagesCleanServiceIntegration: return app - def _create_conversation(self, db_session_with_containers: Session, app): + def _create_conversation(self, db_session_with_containers: Session, app: App): """Helper to create a conversation.""" conversation = Conversation( app_id=app.id, diff --git a/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py b/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py index b55a19eaa9..fffa82bf5c 100644 --- a/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py +++ b/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py @@ -5,6 +5,7 @@ from uuid import uuid4 import pytest from sqlalchemy import select +from sqlalchemy.orm import Session from models.dataset import Dataset, DatasetMetadataBinding, Document from models.enums import DataSourceType, DocumentCreatedFrom @@ -65,7 +66,7 @@ class TestMetadataPartialUpdate: yield account def test_partial_update_merges_metadata( - self, flask_app_with_containers, db_session_with_containers, tenant_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( @@ -92,7 +93,7 @@ class TestMetadataPartialUpdate: assert updated_doc.doc_metadata["new_key"] == "new_value" def test_full_update_replaces_metadata( - self, flask_app_with_containers, db_session_with_containers, tenant_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( @@ -119,7 +120,7 @@ class TestMetadataPartialUpdate: assert "existing_key" not in updated_doc.doc_metadata def test_partial_update_skips_existing_binding( - self, flask_app_with_containers, db_session_with_containers, tenant_id, user_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, user_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( @@ -159,7 +160,7 @@ class TestMetadataPartialUpdate: assert len(bindings) == 1 def test_rollback_called_on_commit_failure( - self, flask_app_with_containers, db_session_with_containers, tenant_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( diff --git a/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py b/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py index c146a5924b..5fa5de6d80 100644 --- a/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py +++ b/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py @@ -8,6 +8,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest from models.model import OAuthProviderApp @@ -25,7 +26,7 @@ from services.oauth_server import ( class TestOAuthServerServiceGetProviderApp: """DB-backed tests for get_oauth_provider_app.""" - def _create_oauth_provider_app(self, db_session_with_containers, *, client_id: str) -> OAuthProviderApp: + def _create_oauth_provider_app(self, db_session_with_containers: Session, *, client_id: str) -> OAuthProviderApp: app = OAuthProviderApp( app_icon="icon.png", client_id=client_id, @@ -38,7 +39,7 @@ class TestOAuthServerServiceGetProviderApp: db_session_with_containers.commit() return app - def test_get_oauth_provider_app_returns_app_when_exists(self, db_session_with_containers): + def test_get_oauth_provider_app_returns_app_when_exists(self, db_session_with_containers: Session): client_id = f"client-{uuid4()}" created = self._create_oauth_provider_app(db_session_with_containers, client_id=client_id) @@ -48,7 +49,7 @@ class TestOAuthServerServiceGetProviderApp: assert result.client_id == client_id assert result.id == created.id - def test_get_oauth_provider_app_returns_none_when_not_exists(self, db_session_with_containers): + def test_get_oauth_provider_app_returns_none_when_not_exists(self, db_session_with_containers: Session): result = OAuthServerService.get_oauth_provider_app(f"nonexistent-{uuid4()}") assert result is None diff --git a/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py b/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py index 7036524918..2f20949611 100644 --- a/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py +++ b/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py @@ -8,6 +8,7 @@ from datetime import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from models.workflow import WorkflowPause, WorkflowRun from services.retention.workflow_run.restore_archived_workflow_run import WorkflowRunRestore @@ -39,7 +40,7 @@ class TestWorkflowRunRestore: assert result["created_at"].month == 1 assert result["name"] == "test" - def test_restore_table_records_returns_rowcount(self, db_session_with_containers): + def test_restore_table_records_returns_rowcount(self, db_session_with_containers: Session): """Restore should return inserted rowcount.""" restore = WorkflowRunRestore() record_id = str(uuid4()) @@ -65,7 +66,7 @@ class TestWorkflowRunRestore: restored_pause = db_session_with_containers.scalar(select(WorkflowPause).where(WorkflowPause.id == record_id)) assert restored_pause is not None - def test_restore_table_records_unknown_table(self, db_session_with_containers): + def test_restore_table_records_unknown_table(self, db_session_with_containers: Session): """Unknown table names should be ignored gracefully.""" restore = WorkflowRunRestore() diff --git a/api/tests/test_containers_integration_tests/services/test_saved_message_service.py b/api/tests/test_containers_integration_tests/services/test_saved_message_service.py index 70aa813142..7b9e9924cd 100644 --- a/api/tests/test_containers_integration_tests/services/test_saved_message_service.py +++ b/api/tests/test_containers_integration_tests/services/test_saved_message_service.py @@ -4,6 +4,7 @@ import pytest from faker import Faker from sqlalchemy.orm import Session +from models import App, CreatorUserRole from models.enums import ConversationFromSource from models.model import EndUser, Message from models.web import SavedMessage @@ -88,7 +89,7 @@ class TestSavedMessageService: return app, account - def _create_test_end_user(self, db_session_with_containers: Session, app): + def _create_test_end_user(self, db_session_with_containers: Session, app: App): """ Helper method to create a test end user for testing. @@ -116,7 +117,7 @@ class TestSavedMessageService: return end_user - def _create_test_message(self, db_session_with_containers: Session, app, user): + def _create_test_message(self, db_session_with_containers: Session, app: App, user): """ Helper method to create a test message for testing. @@ -199,13 +200,13 @@ class TestSavedMessageService: saved_message1 = SavedMessage( app_id=app.id, message_id=message1.id, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, ) saved_message2 = SavedMessage( app_id=app.id, message_id=message2.id, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, ) @@ -272,13 +273,13 @@ class TestSavedMessageService: saved_message1 = SavedMessage( app_id=app.id, message_id=message1.id, - created_by_role="end_user", + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, ) saved_message2 = SavedMessage( app_id=app.id, message_id=message2.id, - created_by_role="end_user", + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, ) @@ -449,7 +450,7 @@ class TestSavedMessageService: saved_message = SavedMessage( app_id=app.id, message_id=message.id, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, ) @@ -540,7 +541,9 @@ class TestSavedMessageService: message = self._create_test_message(db_session_with_containers, app, account) # Pre-create a saved message - saved = SavedMessage(app_id=app.id, message_id=message.id, created_by_role="account", created_by=account.id) + saved = SavedMessage( + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id + ) db_session_with_containers.add(saved) db_session_with_containers.commit() @@ -571,7 +574,9 @@ class TestSavedMessageService: end_user = self._create_test_end_user(db_session_with_containers, app) message = self._create_test_message(db_session_with_containers, app, end_user) - saved = SavedMessage(app_id=app.id, message_id=message.id, created_by_role="end_user", created_by=end_user.id) + saved = SavedMessage( + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.END_USER, created_by=end_user.id + ) db_session_with_containers.add(saved) db_session_with_containers.commit() @@ -596,10 +601,10 @@ class TestSavedMessageService: # Both users save the same message saved_account = SavedMessage( - app_id=app.id, message_id=message.id, created_by_role="account", created_by=account1.id + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.ACCOUNT, created_by=account1.id ) saved_end_user = SavedMessage( - app_id=app.id, message_id=message.id, created_by_role="end_user", created_by=end_user.id + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.END_USER, created_by=end_user.id ) db_session_with_containers.add_all([saved_account, saved_end_user]) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_tag_service.py b/api/tests/test_containers_integration_tests/services/test_tag_service.py index 5a6bf0466e..583b6128e6 100644 --- a/api/tests/test_containers_integration_tests/services/test_tag_service.py +++ b/api/tests/test_containers_integration_tests/services/test_tag_service.py @@ -1099,38 +1099,39 @@ class TestTagService: db_session_with_containers, mock_external_service_dependencies ) - # Create tag - tag = self._create_test_tags( - db_session_with_containers, mock_external_service_dependencies, tenant.id, "knowledge", 1 - )[0] + # Create tags + tags = self._create_test_tags( + db_session_with_containers, mock_external_service_dependencies, tenant.id, "knowledge", 2 + ) - # Create dataset and bind tag + # Create dataset and bind tags dataset = self._create_test_dataset(db_session_with_containers, mock_external_service_dependencies, tenant.id) self._create_test_tag_bindings( - db_session_with_containers, mock_external_service_dependencies, [tag], dataset.id, tenant.id + db_session_with_containers, mock_external_service_dependencies, tags, dataset.id, tenant.id ) - # Verify binding exists before deletion - - binding_before = ( + # Verify bindings exist before deletion + bindings_before = ( db_session_with_containers.query(TagBinding) - .where(TagBinding.tag_id == tag.id, TagBinding.target_id == dataset.id) - .first() + .where(TagBinding.tag_id.in_([tag.id for tag in tags]), TagBinding.target_id == dataset.id) + .all() ) - assert binding_before is not None + assert len(bindings_before) == 2 # Act: Execute the method under test - delete_payload = TagBindingDeletePayload(type="knowledge", target_id=dataset.id, tag_id=tag.id) + delete_payload = TagBindingDeletePayload( + type="knowledge", target_id=dataset.id, tag_ids=[tag.id for tag in tags] + ) TagService.delete_tag_binding(delete_payload) # Assert: Verify the expected outcomes - # Verify tag binding was deleted - binding_after = ( + # Verify tag bindings were deleted + bindings_after = ( db_session_with_containers.query(TagBinding) - .where(TagBinding.tag_id == tag.id, TagBinding.target_id == dataset.id) - .first() + .where(TagBinding.tag_id.in_([tag.id for tag in tags]), TagBinding.target_id == dataset.id) + .all() ) - assert binding_after is None + assert len(bindings_after) == 0 def test_delete_tag_binding_non_existent_binding( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -1156,7 +1157,7 @@ class TestTagService: app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant.id) # Act: Try to delete non-existent binding - delete_payload = TagBindingDeletePayload(type="app", target_id=app.id, tag_id=tag.id) + delete_payload = TagBindingDeletePayload(type="app", target_id=app.id, tag_ids=[tag.id]) TagService.delete_tag_binding(delete_payload) # Assert: Verify the expected outcomes diff --git a/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py index f2307fbd7d..797731d04b 100644 --- a/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py @@ -6,7 +6,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom -from models import Account +from models import Account, App from models.enums import ConversationFromSource from models.model import Conversation, EndUser from models.web import PinnedConversation @@ -93,7 +93,7 @@ class TestWebConversationService: return app, account - def _create_test_end_user(self, db_session_with_containers: Session, app): + def _create_test_end_user(self, db_session_with_containers: Session, app: App): """ Helper method to create a test end user for testing. diff --git a/api/tests/test_containers_integration_tests/services/test_webhook_service.py b/api/tests/test_containers_integration_tests/services/test_webhook_service.py index 970da98c55..6d5c7380b7 100644 --- a/api/tests/test_containers_integration_tests/services/test_webhook_service.py +++ b/api/tests/test_containers_integration_tests/services/test_webhook_service.py @@ -5,6 +5,7 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from flask import Flask +from sqlalchemy.orm import Session from werkzeug.datastructures import FileStorage from models.enums import AppTriggerStatus, AppTriggerType @@ -52,7 +53,7 @@ class TestWebhookService: } @pytest.fixture - def test_data(self, db_session_with_containers, mock_external_dependencies): + def test_data(self, db_session_with_containers: Session, mock_external_dependencies): """Create test data for webhook service tests.""" fake = Faker() @@ -160,7 +161,7 @@ class TestWebhookService: "app_trigger": app_trigger, } - def test_get_webhook_trigger_and_workflow_success(self, test_data, flask_app_with_containers): + def test_get_webhook_trigger_and_workflow_success(self, test_data, flask_app_with_containers: Flask): """Test successful retrieval of webhook trigger and workflow.""" webhook_id = test_data["webhook_id"] @@ -175,7 +176,7 @@ class TestWebhookService: assert node_config["id"] == "webhook_node" assert node_config["data"].title == "Test Webhook" - def test_get_webhook_trigger_and_workflow_not_found(self, flask_app_with_containers): + def test_get_webhook_trigger_and_workflow_not_found(self, flask_app_with_containers: Flask): """Test webhook trigger not found scenario.""" with flask_app_with_containers.app_context(): with pytest.raises(ValueError, match="Webhook not found"): @@ -421,7 +422,9 @@ class TestWebhookService: assert result["files"] == {} - def test_trigger_workflow_execution_success(self, test_data, mock_external_dependencies, flask_app_with_containers): + def test_trigger_workflow_execution_success( + self, test_data, mock_external_dependencies, flask_app_with_containers: Flask + ): """Test successful workflow execution trigger.""" webhook_data = { "method": "POST", @@ -452,7 +455,7 @@ class TestWebhookService: mock_external_dependencies["async_service"].trigger_workflow_async.assert_called_once() def test_trigger_workflow_execution_end_user_service_failure( - self, test_data, mock_external_dependencies, flask_app_with_containers + self, test_data, mock_external_dependencies, flask_app_with_containers: Flask ): """Test workflow execution trigger when EndUserService fails.""" webhook_data = {"method": "POST", "headers": {}, "query_params": {}, "body": {}, "files": {}} diff --git a/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py b/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py index 85ce3a6ba6..69cde847f8 100644 --- a/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py +++ b/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy import select from sqlalchemy.orm import Session @@ -165,7 +166,7 @@ class WebhookServiceRelationshipFactory: class TestWebhookServiceLookupWithContainers: def test_get_webhook_trigger_and_workflow_raises_when_app_trigger_missing( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -182,7 +183,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_raises_when_app_trigger_rate_limited( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -202,7 +203,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_raises_when_app_trigger_disabled( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -222,7 +223,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_raises_when_workflow_missing( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -239,7 +240,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_returns_debug_draft_workflow( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -275,7 +276,7 @@ class TestWebhookServiceLookupWithContainers: class TestWebhookServiceTriggerExecutionWithContainers: def test_trigger_workflow_execution_triggers_async_workflow_successfully( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -318,7 +319,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: assert trigger_args[2].root_node_id == webhook_trigger.node_id def test_trigger_workflow_execution_marks_tenant_rate_limited_when_quota_exceeded( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -354,7 +355,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: mock_mark_rate_limited.assert_called_once_with(tenant.id) def test_trigger_workflow_execution_logs_and_reraises_unexpected_errors( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -386,7 +387,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: class TestWebhookServiceRelationshipSyncWithContainers: def test_sync_webhook_relationships_raises_when_workflow_exceeds_node_limit( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -401,7 +402,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: WebhookService.sync_webhook_relationships(app, workflow) def test_sync_webhook_relationships_raises_when_lock_not_acquired( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -418,7 +419,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: WebhookService.sync_webhook_relationships(app, workflow) def test_sync_webhook_relationships_creates_missing_records_and_deletes_stale_records( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -455,7 +456,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: assert db_session_with_containers.get(WorkflowWebhookTrigger, stale_trigger_id) is None def test_sync_webhook_relationships_sets_redis_cache_for_new_record( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -481,7 +482,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: assert cached_payload["webhook_id"] == "cache-webhook-id-00001" def test_sync_webhook_relationships_logs_when_lock_release_fails( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py index 1e57b5603d..a2cdddad61 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py @@ -1530,7 +1530,7 @@ class TestWorkflowAppService: assert result_cross_tenant["total"] == 0 def test_get_paginate_workflow_app_logs_raises_when_account_filter_email_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) service = WorkflowAppService() @@ -1543,7 +1543,7 @@ class TestWorkflowAppService: ) def test_get_paginate_workflow_app_logs_filters_by_account( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) service = WorkflowAppService() @@ -1558,7 +1558,9 @@ class TestWorkflowAppService: assert result["total"] >= 0 assert isinstance(result["data"], list) - def test_get_paginate_workflow_archive_logs(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_paginate_workflow_archive_logs( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) service = WorkflowAppService() diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py index 86cf2327c7..82fe391b08 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py @@ -45,7 +45,9 @@ class TestWorkflowDraftVariableService: # WorkflowDraftVariableService doesn't have external dependencies that need mocking return {} - def _create_test_app(self, db_session_with_containers: Session, mock_external_service_dependencies, fake=None): + def _create_test_app( + self, db_session_with_containers: Session, mock_external_service_dependencies, fake: Faker | None = None + ): """ Helper method to create a test app with realistic data for testing. @@ -80,7 +82,7 @@ class TestWorkflowDraftVariableService: db_session_with_containers.commit() return app - def _create_test_workflow(self, db_session_with_containers: Session, app, fake=None): + def _create_test_workflow(self, db_session_with_containers: Session, app, fake: Faker | None = None): """ Helper method to create a test workflow associated with an app. diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index b5ce8a53de..9ba1fda08b 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -12,7 +12,7 @@ import pytest from faker import Faker from sqlalchemy.orm import Session -from models import Account, App, Workflow +from models import Account, AccountStatus, App, TenantStatus, Workflow from models.model import AppMode from models.workflow import WorkflowType from services.workflow_service import WorkflowService @@ -33,7 +33,7 @@ class TestWorkflowService: and realistic testing environment with actual database interactions. """ - def _create_test_account(self, db_session_with_containers: Session, fake=None): + def _create_test_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test account with realistic data. @@ -49,7 +49,7 @@ class TestWorkflowService: email=fake.email(), name=fake.name(), avatar=fake.url(), - status="active", + status=AccountStatus.ACTIVE, interface_language="en-US", # Set interface language for Site creation ) account.created_at = fake.date_time_this_year() @@ -62,7 +62,7 @@ class TestWorkflowService: tenant = Tenant( name=f"Test Tenant {fake.company()}", plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) tenant.id = account.current_tenant_id tenant.created_at = fake.date_time_this_year() @@ -77,7 +77,7 @@ class TestWorkflowService: return account - def _create_test_app(self, db_session_with_containers: Session, fake=None): + def _create_test_app(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test app with realistic data. @@ -109,7 +109,7 @@ class TestWorkflowService: db_session_with_containers.commit() return app - def _create_test_workflow(self, db_session_with_containers: Session, app, account, fake=None): + def _create_test_workflow(self, db_session_with_containers: Session, app, account, fake: Faker | None = None): """ Helper method to create a test workflow associated with an app. diff --git a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py index 29e1e240b4..afc4908c15 100644 --- a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py +++ b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py @@ -100,7 +100,7 @@ class TestWorkflowDeletion: session.flush() return provider - def test_delete_workflow_success(self, db_session_with_containers): + def test_delete_workflow_success(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( @@ -118,7 +118,7 @@ class TestWorkflowDeletion: db_session_with_containers.expire_all() assert db_session_with_containers.get(Workflow, workflow_id) is None - def test_delete_draft_workflow_raises_error(self, db_session_with_containers): + def test_delete_draft_workflow_raises_error(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( @@ -130,7 +130,7 @@ class TestWorkflowDeletion: with pytest.raises(DraftWorkflowDeletionError): service.delete_workflow(session=db_session_with_containers, workflow_id=workflow.id, tenant_id=tenant.id) - def test_delete_workflow_in_use_by_app_raises_error(self, db_session_with_containers): + def test_delete_workflow_in_use_by_app_raises_error(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( @@ -144,7 +144,7 @@ class TestWorkflowDeletion: with pytest.raises(WorkflowInUseError, match="currently in use by app"): service.delete_workflow(session=db_session_with_containers, workflow_id=workflow.id, tenant_id=tenant.id) - def test_delete_workflow_published_as_tool_raises_error(self, db_session_with_containers): + def test_delete_workflow_published_as_tool_raises_error(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( diff --git a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py index 4dab895135..32b76c3469 100644 --- a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py +++ b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py @@ -64,7 +64,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: db_session_with_containers.commit() return execution - def test_get_node_last_execution_found(self, db_session_with_containers): + def test_get_node_last_execution_found(self, db_session_with_containers: Session): """Test getting the last execution for a node when it exists.""" # Arrange tenant_id = str(uuid4()) @@ -110,7 +110,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert result.id == expected.id assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED - def test_get_node_last_execution_not_found(self, db_session_with_containers): + def test_get_node_last_execution_not_found(self, db_session_with_containers: Session): """Test getting the last execution for a node when it doesn't exist.""" # Arrange tenant_id = str(uuid4()) @@ -129,7 +129,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: # Assert assert result is None - def test_get_executions_by_workflow_run_empty(self, db_session_with_containers): + def test_get_executions_by_workflow_run_empty(self, db_session_with_containers: Session): """Test getting executions for a workflow run when none exist.""" # Arrange tenant_id = str(uuid4()) @@ -147,7 +147,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: # Assert assert result == [] - def test_get_execution_by_id_found(self, db_session_with_containers): + def test_get_execution_by_id_found(self, db_session_with_containers: Session): """Test getting execution by ID when it exists.""" # Arrange execution = self._create_execution( @@ -170,7 +170,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert result is not None assert result.id == execution.id - def test_get_execution_by_id_not_found(self, db_session_with_containers): + def test_get_execution_by_id_not_found(self, db_session_with_containers: Session): """Test getting execution by ID when it doesn't exist.""" # Arrange repository = self._create_repository(db_session_with_containers) @@ -182,7 +182,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: # Assert assert result is None - def test_delete_expired_executions(self, db_session_with_containers): + def test_delete_expired_executions(self, db_session_with_containers: Session): """Test deleting expired executions.""" # Arrange tenant_id = str(uuid4()) @@ -248,7 +248,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert old_execution_2_id not in remaining_ids assert kept_execution_id in remaining_ids - def test_delete_executions_by_app(self, db_session_with_containers): + def test_delete_executions_by_app(self, db_session_with_containers: Session): """Test deleting executions by app.""" # Arrange tenant_id = str(uuid4()) @@ -313,7 +313,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert deleted_2_id not in remaining_ids assert kept_id in remaining_ids - def test_get_expired_executions_batch(self, db_session_with_containers): + def test_get_expired_executions_batch(self, db_session_with_containers: Session): """Test getting expired executions batch for backup.""" # Arrange tenant_id = str(uuid4()) @@ -370,7 +370,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert old_execution_1.id in result_ids assert old_execution_2.id in result_ids - def test_delete_executions_by_ids(self, db_session_with_containers): + def test_delete_executions_by_ids(self, db_session_with_containers: Session): """Test deleting executions by IDs.""" # Arrange tenant_id = str(uuid4()) @@ -424,7 +424,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: ).all() assert remaining == [] - def test_delete_executions_by_ids_empty_list(self, db_session_with_containers): + def test_delete_executions_by_ids_empty_list(self, db_session_with_containers: Session): """Test deleting executions with empty ID list.""" # Arrange repository = self._create_repository(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py b/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py index bf36da242b..d4e8b7819d 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py @@ -62,7 +62,7 @@ class TestCleanNotionDocumentTask: yield mock_factory def test_clean_notion_document_task_success( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test successful cleanup of Notion documents with proper database operations. @@ -182,7 +182,7 @@ class TestCleanNotionDocumentTask: # 5. The task completes without errors def test_clean_notion_document_task_dataset_not_found( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task behavior when dataset is not found. @@ -202,7 +202,7 @@ class TestCleanNotionDocumentTask: mock_index_processor_factory.return_value.init_index_processor.assert_not_called() def test_clean_notion_document_task_empty_document_list( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task behavior with empty document list. @@ -246,7 +246,7 @@ class TestCleanNotionDocumentTask: assert args[1] == [] def test_clean_notion_document_task_with_different_index_types( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with different dataset index types. @@ -339,7 +339,7 @@ class TestCleanNotionDocumentTask: mock_index_processor_factory.reset_mock() def test_clean_notion_document_task_with_segments_no_index_node_ids( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with segments that have no index_node_ids. @@ -427,7 +427,7 @@ class TestCleanNotionDocumentTask: # are properly deleted from the database. def test_clean_notion_document_task_partial_document_cleanup( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with partial document cleanup scenario. @@ -556,7 +556,7 @@ class TestCleanNotionDocumentTask: # The database operations work correctly, isolating only the specified documents. def test_clean_notion_document_task_with_mixed_segment_statuses( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with segments in different statuses. @@ -656,7 +656,7 @@ class TestCleanNotionDocumentTask: # IndexProcessor verification would require more sophisticated mocking. def test_clean_notion_document_task_continues_when_index_processor_fails( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Index processor failure (e.g. transient billing API error propagated via @@ -760,7 +760,7 @@ class TestCleanNotionDocumentTask: assert _count_segments(db_session_with_containers, DocumentSegment.document_id == document.id) == 0 def test_clean_notion_document_task_with_large_number_of_documents( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with a large number of documents and segments. @@ -871,7 +871,7 @@ class TestCleanNotionDocumentTask: # The database efficiently handles large-scale deletions. def test_clean_notion_document_task_with_documents_from_different_tenants( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with documents from different tenants. @@ -1000,7 +1000,7 @@ class TestCleanNotionDocumentTask: # Only documents from the target dataset are affected, maintaining tenant separation. def test_clean_notion_document_task_with_documents_in_different_states( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with documents in different indexing states. @@ -1115,7 +1115,7 @@ class TestCleanNotionDocumentTask: # All documents are deleted regardless of their indexing status. def test_clean_notion_document_task_with_documents_having_metadata( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with documents that have rich metadata. diff --git a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py index 9084667c31..a8d295e6a9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py @@ -12,10 +12,11 @@ from uuid import uuid4 import pytest from faker import Faker from sqlalchemy import delete +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from extensions.ext_redis import redis_client -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.create_segment_to_index_task import create_segment_to_index_task @@ -25,7 +26,7 @@ class TestCreateSegmentToIndexTask: """Integration tests for create_segment_to_index_task using testcontainers.""" @pytest.fixture(autouse=True) - def cleanup_database(self, db_session_with_containers): + def cleanup_database(self, db_session_with_containers: Session): """Clean up database and Redis before each test to ensure isolation.""" # Clear all test data using fixture session @@ -55,7 +56,7 @@ class TestCreateSegmentToIndexTask: "index_processor": mock_processor, } - def _create_test_account_and_tenant(self, db_session_with_containers): + def _create_test_account_and_tenant(self, db_session_with_containers: Session): """ Helper method to create a test account and tenant for testing. @@ -72,7 +73,7 @@ class TestCreateSegmentToIndexTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -81,7 +82,7 @@ class TestCreateSegmentToIndexTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, plan="basic", ) db_session_with_containers.add(tenant) @@ -102,7 +103,7 @@ class TestCreateSegmentToIndexTask: return account, tenant - def _create_test_dataset_and_document(self, db_session_with_containers, tenant_id, account_id): + def _create_test_dataset_and_document(self, db_session_with_containers: Session, tenant_id, account_id): """ Helper method to create a test dataset and document for testing. @@ -151,7 +152,13 @@ class TestCreateSegmentToIndexTask: return dataset, document def _create_test_segment( - self, db_session_with_containers, dataset_id, document_id, tenant_id, account_id, status=SegmentStatus.WAITING + self, + db_session_with_containers: Session, + dataset_id, + document_id, + tenant_id, + account_id, + status=SegmentStatus.WAITING, ): """ Helper method to create a test document segment for testing. @@ -189,7 +196,9 @@ class TestCreateSegmentToIndexTask: return segment - def test_create_segment_to_index_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_create_segment_to_index_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful creation of segment to index. @@ -225,7 +234,7 @@ class TestCreateSegmentToIndexTask: assert redis_client.exists(cache_key) == 0 def test_create_segment_to_index_segment_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of non-existent segment ID. @@ -246,7 +255,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_invalid_status( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with invalid status. @@ -277,7 +286,9 @@ class TestCreateSegmentToIndexTask: # Verify no index processor calls were made mock_external_service_dependencies["index_processor_factory"].assert_not_called() - def test_create_segment_to_index_no_dataset(self, db_session_with_containers, mock_external_service_dependencies): + def test_create_segment_to_index_no_dataset( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test handling of segment without associated dataset. @@ -330,7 +341,9 @@ class TestCreateSegmentToIndexTask: # Verify no index processor calls were made mock_external_service_dependencies["index_processor_factory"].assert_not_called() - def test_create_segment_to_index_no_document(self, db_session_with_containers, mock_external_service_dependencies): + def test_create_segment_to_index_no_document( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test handling of segment without associated document. @@ -367,7 +380,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_document_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with disabled document. @@ -403,7 +416,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_document_archived( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with archived document. @@ -439,7 +452,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_document_indexing_incomplete( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with document that has incomplete indexing. @@ -475,7 +488,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_processor_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of index processor exceptions. @@ -511,7 +524,7 @@ class TestCreateSegmentToIndexTask: assert redis_client.exists(cache_key) == 0 def test_create_segment_to_index_with_keywords( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with custom keywords. @@ -543,7 +556,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor"].load.assert_called_once() def test_create_segment_to_index_different_doc_forms( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with different document forms. @@ -586,7 +599,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_called_with(doc_form) def test_create_segment_to_index_performance_timing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing performance and timing. @@ -617,7 +630,7 @@ class TestCreateSegmentToIndexTask: assert segment.status == SegmentStatus.COMPLETED def test_create_segment_to_index_concurrent_execution( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test concurrent execution of segment indexing tasks. @@ -654,7 +667,7 @@ class TestCreateSegmentToIndexTask: assert mock_external_service_dependencies["index_processor_factory"].call_count == 3 def test_create_segment_to_index_large_content( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with large content. @@ -703,7 +716,7 @@ class TestCreateSegmentToIndexTask: assert segment.completed_at is not None def test_create_segment_to_index_redis_failure( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing when Redis operations fail. @@ -743,7 +756,7 @@ class TestCreateSegmentToIndexTask: assert redis_client.exists(cache_key) == 1 def test_create_segment_to_index_database_transaction_rollback( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with database transaction handling. @@ -775,7 +788,7 @@ class TestCreateSegmentToIndexTask: assert segment.error is not None def test_create_segment_to_index_metadata_validation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with metadata validation. @@ -817,7 +830,7 @@ class TestCreateSegmentToIndexTask: assert doc is not None def test_create_segment_to_index_status_transition_flow( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test complete status transition flow during indexing. @@ -852,7 +865,7 @@ class TestCreateSegmentToIndexTask: assert segment.indexing_at <= segment.completed_at def test_create_segment_to_index_with_empty_content( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with empty or minimal content. @@ -894,7 +907,7 @@ class TestCreateSegmentToIndexTask: assert segment.completed_at is not None def test_create_segment_to_index_with_special_characters( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with special characters and unicode content. @@ -940,7 +953,7 @@ class TestCreateSegmentToIndexTask: assert segment.completed_at is not None def test_create_segment_to_index_with_long_keywords( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with long keyword lists. @@ -974,7 +987,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor"].load.assert_called_once() def test_create_segment_to_index_tenant_isolation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with proper tenant isolation. @@ -1017,7 +1030,7 @@ class TestCreateSegmentToIndexTask: assert segment1.tenant_id != segment2.tenant_id def test_create_segment_to_index_with_none_keywords( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with None keywords parameter. @@ -1048,7 +1061,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor"].load.assert_called_once() def test_create_segment_to_index_comprehensive_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Comprehensive integration test covering multiple scenarios. diff --git a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py index 684097851b..5287cd06db 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py @@ -7,11 +7,12 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import select +from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -53,7 +54,7 @@ class _TrackedSessionContext: @pytest.fixture(autouse=True) -def _ensure_testcontainers_db(db_session_with_containers): +def _ensure_testcontainers_db(db_session_with_containers: Session): """Ensure this suite always runs on testcontainers infrastructure.""" return db_session_with_containers @@ -120,12 +121,12 @@ class TestDatasetIndexingTaskIntegration: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.flush() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -174,11 +175,11 @@ class TestDatasetIndexingTaskIntegration: return dataset, documents - def _query_document(self, db_session_with_containers, document_id: str) -> Document | None: + def _query_document(self, db_session_with_containers: Session, document_id: str) -> Document | None: """Return the latest persisted document state.""" return db_session_with_containers.scalar(select(Document).where(Document.id == document_id).limit(1)) - def _assert_documents_parsing(self, db_session_with_containers, document_ids: Sequence[str]) -> None: + def _assert_documents_parsing(self, db_session_with_containers: Session, document_ids: Sequence[str]) -> None: """Assert all target documents are persisted in parsing status.""" db_session_with_containers.expire_all() for document_id in document_ids: @@ -212,7 +213,9 @@ class TestDatasetIndexingTaskIntegration: assert len(opened) >= 2 assert opened_ids <= closed_ids - def test_legacy_document_indexing_task_still_works(self, db_session_with_containers, patched_external_dependencies): + def test_legacy_document_indexing_task_still_works( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Ensure the legacy task entrypoint still updates parsing status.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=2) @@ -225,7 +228,9 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once() self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_batch_processing_multiple_documents(self, db_session_with_containers, patched_external_dependencies): + def test_batch_processing_multiple_documents( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Process multiple documents in one batch.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=3) @@ -240,7 +245,9 @@ class TestDatasetIndexingTaskIntegration: assert len(run_args) == len(document_ids) self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_batch_processing_with_limit_check(self, db_session_with_containers, patched_external_dependencies): + def test_batch_processing_with_limit_check( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Reject batches larger than configured upload limit. This test patches config only to force a deterministic limit branch while keeping SQL writes real. @@ -263,7 +270,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_error_contains(db_session_with_containers, document_ids, "batch upload limit") def test_batch_processing_sandbox_plan_single_document_only( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Reject multi-document upload under sandbox plan.""" # Arrange @@ -280,7 +287,9 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_not_called() self._assert_documents_error_contains(db_session_with_containers, document_ids, "does not support batch upload") - def test_batch_processing_empty_document_list(self, db_session_with_containers, patched_external_dependencies): + def test_batch_processing_empty_document_list( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Handle empty list input without failing.""" # Arrange dataset, _ = self._create_test_dataset_and_documents(db_session_with_containers, document_count=0) @@ -292,7 +301,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once_with([]) def test_tenant_queue_dispatches_next_task_after_completion( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Dispatch the next queued task after current tenant task completes. @@ -337,7 +346,7 @@ class TestDatasetIndexingTaskIntegration: delete_key_spy.assert_not_called() def test_tenant_queue_deletes_running_key_when_no_follow_up_tasks( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Delete tenant running flag when queue has no pending tasks. @@ -362,7 +371,7 @@ class TestDatasetIndexingTaskIntegration: delete_key_spy.assert_called_once() def test_validation_failure_sets_error_status_when_vector_space_at_limit( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Set error status when vector space validation fails before runner phase.""" # Arrange @@ -382,7 +391,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_error_contains(db_session_with_containers, document_ids, "over the limit") def test_runner_exception_does_not_crash_indexing_task( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Catch generic runner exceptions without crashing the task.""" # Arrange @@ -397,7 +406,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once() self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_document_paused_error_handling(self, db_session_with_containers, patched_external_dependencies): + def test_document_paused_error_handling(self, db_session_with_containers: Session, patched_external_dependencies): """Handle DocumentIsPausedError and keep persisted state consistent.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=2) @@ -424,7 +433,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_not_called() def test_tenant_queue_error_handling_still_processes_next_task( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Even on current task failure, enqueue the next waiting tenant task. @@ -491,7 +500,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_all_opened_sessions_closed(session_close_tracker) def test_multiple_documents_with_mixed_success_and_failure( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Process only existing documents when request includes missing ids.""" # Arrange @@ -508,7 +517,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, existing_ids) def test_tenant_queue_dispatches_up_to_concurrency_limit( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Dispatch only up to configured concurrency under queued backlog burst. @@ -543,7 +552,7 @@ class TestDatasetIndexingTaskIntegration: assert task_dispatch_spy.apply_async.call_count == concurrency_limit assert set_waiting_spy.call_count == concurrency_limit - def test_task_queue_fifo_ordering(self, db_session_with_containers, patched_external_dependencies): + def test_task_queue_fifo_ordering(self, db_session_with_containers: Session, patched_external_dependencies): """Keep FIFO ordering when dispatching next queued tasks. Queue APIs are patched to isolate dispatch side effects while preserving DB assertions. @@ -576,7 +585,9 @@ class TestDatasetIndexingTaskIntegration: call_kwargs = task_dispatch_spy.apply_async.call_args_list[index].kwargs.get("kwargs", {}) assert call_kwargs.get("document_ids") == expected_task["document_ids"] - def test_billing_disabled_skips_limit_checks(self, db_session_with_containers, patched_external_dependencies): + def test_billing_disabled_skips_limit_checks( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Skip limit checks when billing feature is disabled.""" # Arrange large_document_ids = [str(uuid.uuid4()) for _ in range(100)] @@ -595,7 +606,7 @@ class TestDatasetIndexingTaskIntegration: assert len(run_args) == 100 self._assert_documents_parsing(db_session_with_containers, large_document_ids) - def test_complete_workflow_normal_task(self, db_session_with_containers, patched_external_dependencies): + def test_complete_workflow_normal_task(self, db_session_with_containers: Session, patched_external_dependencies): """Run end-to-end normal queue workflow with tenant queue cleanup. Queue APIs are patched to isolate dispatch side effects while preserving DB assertions. @@ -618,7 +629,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, document_ids) delete_key_spy.assert_called_once() - def test_complete_workflow_priority_task(self, db_session_with_containers, patched_external_dependencies): + def test_complete_workflow_priority_task(self, db_session_with_containers: Session, patched_external_dependencies): """Run end-to-end priority queue workflow with tenant queue cleanup. Queue APIs are patched to isolate dispatch side effects while preserving DB assertions. @@ -641,7 +652,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, document_ids) delete_key_spy.assert_called_once() - def test_single_document_processing(self, db_session_with_containers, patched_external_dependencies): + def test_single_document_processing(self, db_session_with_containers: Session, patched_external_dependencies): """Process the minimum batch size (single document).""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=1) @@ -655,7 +666,9 @@ class TestDatasetIndexingTaskIntegration: assert len(run_args) == 1 self._assert_documents_parsing(db_session_with_containers, [document_id]) - def test_document_with_special_characters_in_id(self, db_session_with_containers, patched_external_dependencies): + def test_document_with_special_characters_in_id( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Handle standard UUID ids with hyphen characters safely.""" # Arrange special_document_id = str(uuid.uuid4()) @@ -670,7 +683,9 @@ class TestDatasetIndexingTaskIntegration: # Assert self._assert_documents_parsing(db_session_with_containers, [special_document_id]) - def test_zero_vector_space_limit_allows_unlimited(self, db_session_with_containers, patched_external_dependencies): + def test_zero_vector_space_limit_allows_unlimited( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Treat vector limit 0 as unlimited and continue indexing.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=3) @@ -689,7 +704,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, document_ids) def test_negative_vector_space_values_handled_gracefully( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Treat negative vector limits as non-blocking and continue indexing.""" # Arrange @@ -708,7 +723,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once() self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_large_document_batch_processing(self, db_session_with_containers, patched_external_dependencies): + def test_large_document_batch_processing(self, db_session_with_containers: Session, patched_external_dependencies): """Process a batch exactly at configured upload limit. This test patches config only to force a deterministic limit branch while keeping SQL writes real. diff --git a/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py index 48fec441c5..e4cbb9e589 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py @@ -12,6 +12,7 @@ from unittest.mock import ANY, Mock, patch import pytest from faker import Faker from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document, DocumentSegment @@ -55,7 +56,7 @@ class TestDealDatasetVectorIndexTask: yield mock_factory @pytest.fixture - def account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + def account_and_tenant(self, db_session_with_containers: Session, mock_external_service_dependencies): """Create an account with an owner tenant for testing. Returns a tuple of (account, tenant) where tenant is guaranteed to be non-None. @@ -73,7 +74,7 @@ class TestDealDatasetVectorIndexTask: return account, tenant def test_deal_dataset_vector_index_task_remove_action_success( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test successful removal of dataset vector index. @@ -131,7 +132,7 @@ class TestDealDatasetVectorIndexTask: assert mock_processor.clean.call_count >= 0 # For now, just check it doesn't fail def test_deal_dataset_vector_index_task_add_action_success( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test successful addition of dataset vector index. @@ -233,7 +234,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_update_action_success( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test successful update of dataset vector index. @@ -337,7 +338,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_dataset_not_found_error( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior when dataset is not found. @@ -357,7 +358,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_add_action_no_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test add action when no documents exist for the dataset. @@ -389,7 +390,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_add_action_no_segments( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test add action when documents exist but have no segments. @@ -447,7 +448,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_update_action_no_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test update action when no documents exist for the dataset. @@ -480,7 +481,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_add_action_with_exception_handling( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test add action with exception handling during processing. @@ -578,7 +579,7 @@ class TestDealDatasetVectorIndexTask: assert "Test exception during indexing" in updated_document.error def test_deal_dataset_vector_index_task_with_custom_index_type( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with custom index type (QA_INDEX). @@ -656,7 +657,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_with_default_index_type( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with default index type (PARAGRAPH_INDEX). @@ -734,7 +735,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_multiple_documents_processing( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task processing with multiple documents and segments. @@ -839,7 +840,7 @@ class TestDealDatasetVectorIndexTask: assert mock_processor.load.call_count == 3 def test_deal_dataset_vector_index_task_document_status_transitions( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test document status transitions during task execution. @@ -938,7 +939,7 @@ class TestDealDatasetVectorIndexTask: assert updated_document.indexing_status == IndexingStatus.COMPLETED def test_deal_dataset_vector_index_task_with_disabled_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with disabled documents. @@ -1061,7 +1062,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_with_archived_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with archived documents. @@ -1184,7 +1185,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_with_incomplete_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with documents that have incomplete indexing status. diff --git a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py index 8a69707b38..f4a71040c1 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py @@ -11,9 +11,19 @@ import logging from unittest.mock import MagicMock, patch from faker import Faker +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models import Account, Dataset, Document, DocumentSegment, Tenant +from models import ( + Account, + AccountStatus, + Dataset, + DatasetPermissionEnum, + Document, + DocumentSegment, + Tenant, + TenantStatus, +) from models.enums import DataSourceType, DocumentCreatedFrom, DocumentDocType, IndexingStatus, SegmentStatus from tasks.delete_segment_from_index_task import delete_segment_from_index_task @@ -37,7 +47,7 @@ class TestDeleteSegmentFromIndexTask: and realistic testing environment with actual database interactions. """ - def _create_test_tenant(self, db_session_with_containers, fake=None): + def _create_test_tenant(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test tenant with realistic data. @@ -49,7 +59,7 @@ class TestDeleteSegmentFromIndexTask: Tenant: Created test tenant instance """ fake = fake or Faker() - tenant = Tenant(name=f"Test Tenant {fake.company()}", plan="basic", status="normal") + tenant = Tenant(name=f"Test Tenant {fake.company()}", plan="basic", status=TenantStatus.NORMAL) tenant.id = fake.uuid4() tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -58,7 +68,7 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return tenant - def _create_test_account(self, db_session_with_containers, tenant, fake=None): + def _create_test_account(self, db_session_with_containers: Session, tenant, fake: Faker | None = None): """ Helper method to create a test account with realistic data. @@ -75,7 +85,7 @@ class TestDeleteSegmentFromIndexTask: name=fake.name(), email=fake.email(), avatar=fake.url(), - status="active", + status=AccountStatus.ACTIVE, interface_language="en-US", ) account.id = fake.uuid4() @@ -86,7 +96,9 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return account - def _create_test_dataset(self, db_session_with_containers, tenant, account, fake=None): + def _create_test_dataset( + self, db_session_with_containers: Session, tenant: Tenant, account: Account, fake: Faker | None = None + ): """ Helper method to create a test dataset with realistic data. @@ -106,7 +118,7 @@ class TestDeleteSegmentFromIndexTask: dataset.name = f"Test Dataset {fake.word()}" dataset.description = fake.text(max_nb_chars=200) dataset.provider = "vendor" - dataset.permission = "only_me" + dataset.permission = DatasetPermissionEnum.ONLY_ME dataset.data_source_type = DataSourceType.UPLOAD_FILE dataset.indexing_technique = IndexTechniqueType.HIGH_QUALITY dataset.index_struct = '{"type": "paragraph"}' @@ -122,7 +134,7 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return dataset - def _create_test_document(self, db_session_with_containers, dataset, account, fake=None, **kwargs): + def _create_test_document(self, db_session_with_containers: Session, dataset, account, fake=None, **kwargs): """ Helper method to create a test document with realistic data. @@ -172,7 +184,14 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return document - def _create_test_document_segments(self, db_session_with_containers, document, account, count=3, fake=None): + def _create_test_document_segments( + self, + db_session_with_containers: Session, + document: Document, + account: Account, + count: int = 3, + fake: Faker | None = None, + ): """ Helper method to create test document segments with realistic data. @@ -218,7 +237,9 @@ class TestDeleteSegmentFromIndexTask: return segments @patch("tasks.delete_segment_from_index_task.IndexProcessorFactory", autospec=True) - def test_delete_segment_from_index_task_success(self, mock_index_processor_factory, db_session_with_containers): + def test_delete_segment_from_index_task_success( + self, mock_index_processor_factory, db_session_with_containers: Session + ): """ Test successful segment deletion from index with comprehensive verification. @@ -267,7 +288,7 @@ class TestDeleteSegmentFromIndexTask: assert call_args[1]["with_keywords"] is True assert call_args[1]["delete_child_chunks"] is True - def test_delete_segment_from_index_task_dataset_not_found(self, db_session_with_containers): + def test_delete_segment_from_index_task_dataset_not_found(self, db_session_with_containers: Session): """ Test task behavior when dataset is not found. @@ -288,7 +309,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when dataset not found - def test_delete_segment_from_index_task_document_not_found(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_not_found(self, db_session_with_containers: Session): """ Test task behavior when document is not found. @@ -314,7 +335,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when document not found - def test_delete_segment_from_index_task_document_disabled(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_disabled(self, db_session_with_containers: Session): """ Test task behavior when document is disabled. @@ -342,7 +363,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when document is disabled - def test_delete_segment_from_index_task_document_archived(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_archived(self, db_session_with_containers: Session): """ Test task behavior when document is archived. @@ -370,7 +391,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when document is archived - def test_delete_segment_from_index_task_document_not_completed(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_not_completed(self, db_session_with_containers: Session): """ Test task behavior when document indexing is not completed. diff --git a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py index 6e03bd9351..6bfb1e1f1e 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py @@ -13,7 +13,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models import Account, Dataset, DocumentSegment +from models import Account, AccountStatus, Dataset, DocumentSegment, TenantAccountRole, TenantStatus from models import Document as DatasetDocument from models.dataset import DatasetProcessRule from models.enums import DataSourceType, DocumentCreatedFrom, ProcessRuleMode, SegmentStatus @@ -35,7 +35,7 @@ class TestDisableSegmentsFromIndexTask: and realistic testing environment with actual database interactions. """ - def _create_test_account(self, db_session_with_containers: Session, fake=None): + def _create_test_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test account with realistic data. @@ -51,24 +51,23 @@ class TestDisableSegmentsFromIndexTask: email=fake.email(), name=fake.name(), avatar=fake.url(), - status="active", + status=AccountStatus.ACTIVE, interface_language="en-US", ) - account.id = fake.uuid4() # monkey-patch attributes for test setup + account.updated_at = fake.date_time_this_year() + account.created_at = fake.date_time_this_year() + account.role = TenantAccountRole.OWNER + account.id = fake.uuid4() account.tenant_id = fake.uuid4() account.type = "normal" - account.role = "owner" - account.created_at = fake.date_time_this_year() - account.updated_at = account.created_at - # Create a tenant for the account from models.account import Tenant tenant = Tenant( name=f"Test Tenant {fake.company()}", plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) tenant.id = account.tenant_id tenant.created_at = fake.date_time_this_year() @@ -83,7 +82,7 @@ class TestDisableSegmentsFromIndexTask: return account - def _create_test_dataset(self, db_session_with_containers: Session, account, fake=None): + def _create_test_dataset(self, db_session_with_containers: Session, account, fake: Faker | None = None): """ Helper method to create a test dataset with realistic data. @@ -117,7 +116,9 @@ class TestDisableSegmentsFromIndexTask: return dataset - def _create_test_document(self, db_session_with_containers: Session, dataset, account, fake=None): + def _create_test_document( + self, db_session_with_containers: Session, dataset, account: Account, fake: Faker | None = None + ): """ Helper method to create a test document with realistic data. @@ -216,7 +217,7 @@ class TestDisableSegmentsFromIndexTask: return segments - def _create_dataset_process_rule(self, db_session_with_containers: Session, dataset, fake=None): + def _create_dataset_process_rule(self, db_session_with_containers: Session, dataset, fake: Faker | None = None): """ Helper method to create a dataset process rule. diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py index b6e7e6e5c9..77cd259833 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py @@ -13,6 +13,7 @@ from uuid import uuid4 import pytest from sqlalchemy import delete, func, select, update +from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError, IndexingRunner from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType @@ -162,7 +163,7 @@ class TestDocumentIndexingSyncTask: "indexing_runner": indexing_runner, } - def _create_notion_sync_context(self, db_session_with_containers, *, data_source_info: dict | None = None): + def _create_notion_sync_context(self, db_session_with_containers: Session, *, data_source_info: dict | None = None): account, tenant = DocumentIndexingSyncTaskTestDataFactory.create_account_with_tenant(db_session_with_containers) dataset = DocumentIndexingSyncTaskTestDataFactory.create_dataset( db_session_with_containers, @@ -206,7 +207,7 @@ class TestDocumentIndexingSyncTask: "notion_info": notion_info, } - def test_document_not_found(self, db_session_with_containers, mock_external_dependencies): + def test_document_not_found(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task handles missing document gracefully.""" # Arrange dataset_id = str(uuid4()) @@ -219,7 +220,7 @@ class TestDocumentIndexingSyncTask: mock_external_dependencies["datasource_service"].get_datasource_credentials.assert_not_called() mock_external_dependencies["indexing_runner"].run.assert_not_called() - def test_missing_notion_workspace_id(self, db_session_with_containers, mock_external_dependencies): + def test_missing_notion_workspace_id(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task raises error when notion_workspace_id is missing.""" # Arrange context = self._create_notion_sync_context( @@ -235,7 +236,7 @@ class TestDocumentIndexingSyncTask: with pytest.raises(ValueError, match="no notion page found"): document_indexing_sync_task(context["dataset"].id, context["document"].id) - def test_missing_notion_page_id(self, db_session_with_containers, mock_external_dependencies): + def test_missing_notion_page_id(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task raises error when notion_page_id is missing.""" # Arrange context = self._create_notion_sync_context( @@ -251,7 +252,7 @@ class TestDocumentIndexingSyncTask: with pytest.raises(ValueError, match="no notion page found"): document_indexing_sync_task(context["dataset"].id, context["document"].id) - def test_empty_data_source_info(self, db_session_with_containers, mock_external_dependencies): + def test_empty_data_source_info(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task raises error when data_source_info is empty.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers, data_source_info=None) @@ -264,7 +265,7 @@ class TestDocumentIndexingSyncTask: with pytest.raises(ValueError, match="no notion page found"): document_indexing_sync_task(context["dataset"].id, context["document"].id) - def test_credential_not_found(self, db_session_with_containers, mock_external_dependencies): + def test_credential_not_found(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task sets document error state when credential is missing.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -284,7 +285,7 @@ class TestDocumentIndexingSyncTask: assert updated_document.stopped_at is not None mock_external_dependencies["indexing_runner"].run.assert_not_called() - def test_page_not_updated(self, db_session_with_containers, mock_external_dependencies): + def test_page_not_updated(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task exits early when notion page is unchanged.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -310,7 +311,7 @@ class TestDocumentIndexingSyncTask: mock_external_dependencies["index_processor"].clean.assert_not_called() mock_external_dependencies["indexing_runner"].run.assert_not_called() - def test_successful_sync_when_page_updated(self, db_session_with_containers, mock_external_dependencies): + def test_successful_sync_when_page_updated(self, db_session_with_containers: Session, mock_external_dependencies): """Test full successful sync flow with SQL state updates and side effects.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -349,7 +350,7 @@ class TestDocumentIndexingSyncTask: assert len(run_documents) == 1 assert getattr(run_documents[0], "id", None) == context["document"].id - def test_dataset_not_found_during_cleaning(self, db_session_with_containers, mock_external_dependencies): + def test_dataset_not_found_during_cleaning(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task still updates document and reindexes if dataset vanishes before clean.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -376,7 +377,9 @@ class TestDocumentIndexingSyncTask: mock_external_dependencies["index_processor"].clean.assert_not_called() mock_external_dependencies["indexing_runner"].run.assert_called_once() - def test_cleaning_error_continues_to_indexing(self, db_session_with_containers, mock_external_dependencies): + def test_cleaning_error_continues_to_indexing( + self, db_session_with_containers: Session, mock_external_dependencies + ): """Test that indexing continues when index cleanup fails.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -400,7 +403,9 @@ class TestDocumentIndexingSyncTask: assert remaining_segments == 0 mock_external_dependencies["indexing_runner"].run.assert_called_once() - def test_indexing_runner_document_paused_error(self, db_session_with_containers, mock_external_dependencies): + def test_indexing_runner_document_paused_error( + self, db_session_with_containers: Session, mock_external_dependencies + ): """Test that DocumentIsPausedError does not flip document into error state.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -418,7 +423,7 @@ class TestDocumentIndexingSyncTask: assert updated_document.indexing_status == IndexingStatus.PARSING assert updated_document.error is None - def test_indexing_runner_general_error(self, db_session_with_containers, mock_external_dependencies): + def test_indexing_runner_general_error(self, db_session_with_containers: Session, mock_external_dependencies): """Test that indexing errors are persisted to document state.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py index cf1a8666f3..6c1454b6d8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py @@ -3,11 +3,12 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker +from sqlalchemy.orm import Session from core.entities.document_task import DocumentTask from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -51,7 +52,7 @@ class TestDocumentIndexingTasks: } def _create_test_dataset_and_documents( - self, db_session_with_containers, mock_external_service_dependencies, document_count=3 + self, db_session_with_containers: Session, mock_external_service_dependencies, document_count=3 ): """ Helper method to create a test dataset and documents for testing. @@ -71,14 +72,14 @@ class TestDocumentIndexingTasks: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -133,7 +134,7 @@ class TestDocumentIndexingTasks: return dataset, documents def _create_test_dataset_with_billing_features( - self, db_session_with_containers, mock_external_service_dependencies, billing_enabled=True + self, db_session_with_containers: Session, mock_external_service_dependencies, billing_enabled=True ): """ Helper method to create a test dataset with billing features configured. @@ -153,14 +154,14 @@ class TestDocumentIndexingTasks: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -221,7 +222,9 @@ class TestDocumentIndexingTasks: return dataset, documents - def test_document_indexing_task_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_document_indexing_task_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful document indexing with multiple documents. @@ -262,7 +265,7 @@ class TestDocumentIndexingTasks: assert len(processed_documents) == 3 def test_document_indexing_task_dataset_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of non-existent dataset. @@ -286,7 +289,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called() def test_document_indexing_task_document_not_found_in_dataset( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling when some documents don't exist in the dataset. @@ -332,7 +335,7 @@ class TestDocumentIndexingTasks: assert len(processed_documents) == 2 # Only existing documents def test_document_indexing_task_indexing_runner_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of IndexingRunner exceptions. @@ -373,7 +376,7 @@ class TestDocumentIndexingTasks: assert updated_document.processing_started_at is not None def test_document_indexing_task_mixed_document_states( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test processing documents with mixed initial states. @@ -456,7 +459,7 @@ class TestDocumentIndexingTasks: assert len(processed_documents) == 4 def test_document_indexing_task_billing_sandbox_plan_batch_limit( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test billing validation for sandbox plan batch upload limit. @@ -518,7 +521,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner"].assert_not_called() def test_document_indexing_task_billing_disabled_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful processing when billing is disabled. @@ -554,7 +557,7 @@ class TestDocumentIndexingTasks: assert updated_document.processing_started_at is not None def test_document_indexing_task_document_is_paused_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of DocumentIsPausedError from IndexingRunner. @@ -597,7 +600,9 @@ class TestDocumentIndexingTasks: assert updated_document.processing_started_at is not None # ==================== NEW TESTS FOR REFACTORED FUNCTIONS ==================== - def test_old_document_indexing_task_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_old_document_indexing_task_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test document_indexing_task basic functionality. @@ -619,7 +624,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_normal_document_indexing_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test normal_document_indexing_task basic functionality. @@ -643,7 +648,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_priority_document_indexing_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test priority_document_indexing_task basic functionality. @@ -667,7 +672,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_document_indexing_with_tenant_queue_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test _document_indexing_with_tenant_queue function with no waiting tasks. @@ -717,7 +722,7 @@ class TestDocumentIndexingTasks: mock_task_func.delay.assert_not_called() def test_document_indexing_with_tenant_queue_with_waiting_tasks( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test _document_indexing_with_tenant_queue function with waiting tasks in queue using real Redis. @@ -776,7 +781,7 @@ class TestDocumentIndexingTasks: assert len(remaining_tasks) == 1 def test_document_indexing_with_tenant_queue_error_handling( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test error handling in _document_indexing_with_tenant_queue using real Redis. @@ -848,7 +853,7 @@ class TestDocumentIndexingTasks: assert len(remaining_tasks) == 0 def test_document_indexing_with_tenant_queue_tenant_isolation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test tenant isolation in _document_indexing_with_tenant_queue using real Redis. diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py index a9a8c0f30c..208fc1aa1d 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py @@ -3,9 +3,10 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import func, select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.document_indexing_update_task import document_indexing_update_task @@ -33,7 +34,7 @@ class TestDocumentIndexingUpdateTask: "runner_instance": runner_instance, } - def _create_dataset_document_with_segments(self, db_session_with_containers, *, segment_count: int = 2): + def _create_dataset_document_with_segments(self, db_session_with_containers: Session, *, segment_count: int = 2): fake = Faker() # Account and tenant @@ -41,12 +42,12 @@ class TestDocumentIndexingUpdateTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -114,7 +115,7 @@ class TestDocumentIndexingUpdateTask: return dataset, document, node_ids - def test_cleans_segments_and_reindexes(self, db_session_with_containers, mock_external_dependencies): + def test_cleans_segments_and_reindexes(self, db_session_with_containers: Session, mock_external_dependencies): dataset, document, node_ids = self._create_dataset_document_with_segments(db_session_with_containers) # Act @@ -153,7 +154,9 @@ class TestDocumentIndexingUpdateTask: first = run_docs[0] assert getattr(first, "id", None) == document.id - def test_clean_error_is_logged_and_indexing_continues(self, db_session_with_containers, mock_external_dependencies): + def test_clean_error_is_logged_and_indexing_continues( + self, db_session_with_containers: Session, mock_external_dependencies + ): dataset, document, node_ids = self._create_dataset_document_with_segments(db_session_with_containers) # Force clean to raise; task should continue to indexing @@ -173,7 +176,7 @@ class TestDocumentIndexingUpdateTask: ) assert remaining > 0 - def test_document_not_found_noop(self, db_session_with_containers, mock_external_dependencies): + def test_document_not_found_noop(self, db_session_with_containers: Session, mock_external_dependencies): fake = Faker() # Act with non-existent document id document_indexing_update_task(dataset_id=fake.uuid4(), document_id=fake.uuid4()) diff --git a/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py index 39c58987fd..12440f3e6b 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import select +from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType @@ -62,7 +63,7 @@ class TestDuplicateDocumentIndexingTasks: } def _create_test_dataset_and_documents( - self, db_session_with_containers, mock_external_service_dependencies, document_count=3 + self, db_session_with_containers: Session, mock_external_service_dependencies, document_count=3 ): """ Helper method to create a test dataset and documents for testing. @@ -145,7 +146,11 @@ class TestDuplicateDocumentIndexingTasks: return dataset, documents def _create_test_dataset_with_segments( - self, db_session_with_containers, mock_external_service_dependencies, document_count=3, segments_per_doc=2 + self, + db_session_with_containers: Session, + mock_external_service_dependencies, + document_count=3, + segments_per_doc=2, ): """ Helper method to create a test dataset with documents and segments. @@ -197,7 +202,7 @@ class TestDuplicateDocumentIndexingTasks: return dataset, documents, segments def _create_test_dataset_with_billing_features( - self, db_session_with_containers, mock_external_service_dependencies, billing_enabled=True + self, db_session_with_containers: Session, mock_external_service_dependencies, billing_enabled=True ): """ Helper method to create a test dataset with billing features configured. @@ -287,7 +292,7 @@ class TestDuplicateDocumentIndexingTasks: return dataset, documents def _test_duplicate_document_indexing_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful duplicate document indexing with multiple documents. @@ -329,7 +334,7 @@ class TestDuplicateDocumentIndexingTasks: assert len(processed_documents) == 3 def _test_duplicate_document_indexing_task_with_segment_cleanup( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test duplicate document indexing with existing segments that need cleanup. @@ -379,7 +384,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def _test_duplicate_document_indexing_task_dataset_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of non-existent dataset. @@ -404,7 +409,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["index_processor"].clean.assert_not_called() def test_duplicate_document_indexing_task_document_not_found_in_dataset( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling when some documents don't exist in the dataset. @@ -450,7 +455,7 @@ class TestDuplicateDocumentIndexingTasks: assert len(processed_documents) == 2 # Only existing documents def _test_duplicate_document_indexing_task_indexing_runner_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of IndexingRunner exceptions. @@ -491,7 +496,7 @@ class TestDuplicateDocumentIndexingTasks: assert updated_document.processing_started_at is not None def _test_duplicate_document_indexing_task_billing_sandbox_plan_batch_limit( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test billing validation for sandbox plan batch upload limit. @@ -554,7 +559,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called() def _test_duplicate_document_indexing_task_billing_vector_space_limit_exceeded( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test billing validation for vector space limit. @@ -596,7 +601,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called() def test_duplicate_document_indexing_task_with_empty_document_list( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of empty document list. @@ -622,7 +627,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once_with([]) def test_deprecated_duplicate_document_indexing_task_delegates_to_core( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test that deprecated duplicate_document_indexing_task delegates to core function. @@ -655,7 +660,7 @@ class TestDuplicateDocumentIndexingTasks: @patch("tasks.duplicate_document_indexing_task.TenantIsolatedTaskQueue", autospec=True) def test_normal_duplicate_document_indexing_task_with_tenant_queue( - self, mock_queue_class, db_session_with_containers, mock_external_service_dependencies + self, mock_queue_class, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test normal_duplicate_document_indexing_task with tenant isolation queue. @@ -698,7 +703,7 @@ class TestDuplicateDocumentIndexingTasks: @patch("tasks.duplicate_document_indexing_task.TenantIsolatedTaskQueue", autospec=True) def test_priority_duplicate_document_indexing_task_with_tenant_queue( - self, mock_queue_class, db_session_with_containers, mock_external_service_dependencies + self, mock_queue_class, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test priority_duplicate_document_indexing_task with tenant isolation queue. @@ -742,7 +747,7 @@ class TestDuplicateDocumentIndexingTasks: @patch("tasks.duplicate_document_indexing_task.TenantIsolatedTaskQueue", autospec=True) def test_tenant_queue_wrapper_processes_next_tasks( - self, mock_queue_class, db_session_with_containers, mock_external_service_dependencies + self, mock_queue_class, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test tenant queue wrapper processes next queued tasks. @@ -789,7 +794,7 @@ class TestDuplicateDocumentIndexingTasks: mock_queue.delete_task_key.assert_not_called() def test_successful_duplicate_document_indexing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test successful duplicate document indexing flow.""" self._test_duplicate_document_indexing_task_success( @@ -797,7 +802,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_dataset_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when dataset is not found.""" self._test_duplicate_document_indexing_task_dataset_not_found( @@ -805,7 +810,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_with_billing_enabled_sandbox_plan( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing with billing enabled and sandbox plan.""" self._test_duplicate_document_indexing_task_billing_sandbox_plan_batch_limit( @@ -813,7 +818,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_with_billing_limit_exceeded( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when billing limit is exceeded.""" self._test_duplicate_document_indexing_task_billing_vector_space_limit_exceeded( @@ -821,7 +826,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_runner_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when IndexingRunner raises an error.""" self._test_duplicate_document_indexing_task_indexing_runner_exception( @@ -829,7 +834,7 @@ class TestDuplicateDocumentIndexingTasks: ) def _test_duplicate_document_indexing_task_document_is_paused( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when document is paused.""" # Arrange @@ -860,7 +865,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_duplicate_document_indexing_document_is_paused( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when document is paused.""" self._test_duplicate_document_indexing_task_document_is_paused( @@ -868,7 +873,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_cleans_old_segments( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test that duplicate document indexing cleans old segments.""" self._test_duplicate_document_indexing_task_with_segment_cleanup( diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py index ff72232d12..c4895839c9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py @@ -5,6 +5,7 @@ from faker import Faker from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_account_deletion_task import send_account_deletion_verification_code, send_deletion_success_task @@ -55,7 +56,7 @@ class TestMailAccountDeletionTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py index 177af266fb..a697878bb6 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py @@ -2,6 +2,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from libs.email_i18n import EmailType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole @@ -29,7 +30,7 @@ class TestMailChangeMailTask: "get_email_i18n_service": mock_get_email_i18n_service, } - def _create_test_account(self, db_session_with_containers): + def _create_test_account(self, db_session_with_containers: Session): """ Helper method to create a test account for testing. @@ -72,7 +73,7 @@ class TestMailChangeMailTask: return account def test_send_change_mail_task_success_old_email_phase( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful change email task execution for old_email phase. @@ -103,7 +104,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_task_success_new_email_phase( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful change email task execution for new_email phase. @@ -134,7 +135,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_task_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email task when mail service is not initialized. @@ -159,7 +160,7 @@ class TestMailChangeMailTask: mock_external_service_dependencies["email_i18n_service"].send_change_email.assert_not_called() def test_send_change_mail_task_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email task when email service raises an exception. @@ -191,7 +192,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_completed_notification_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful change email completed notification task execution. @@ -224,7 +225,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_completed_notification_task_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email completed notification task when mail service is not initialized. @@ -247,7 +248,7 @@ class TestMailChangeMailTask: mock_external_service_dependencies["email_i18n_service"].send_email.assert_not_called() def test_send_change_mail_completed_notification_task_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email completed notification task when email service raises an exception. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py index 8343711998..0eec166fe2 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py @@ -15,8 +15,10 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import delete +from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import AccountStatus, TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_email_code_login import send_email_code_login_mail_task @@ -37,7 +39,7 @@ class TestSendEmailCodeLoginMailTask: """ @pytest.fixture(autouse=True) - def cleanup_database(self, db_session_with_containers): + def cleanup_database(self, db_session_with_containers: Session): """Clean up database before each test to ensure isolation.""" from extensions.ext_redis import redis_client @@ -71,7 +73,7 @@ class TestSendEmailCodeLoginMailTask: "email_service_instance": mock_email_service_instance, } - def _create_test_account(self, db_session_with_containers, fake=None): + def _create_test_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test account for testing. @@ -90,7 +92,7 @@ class TestSendEmailCodeLoginMailTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -98,7 +100,7 @@ class TestSendEmailCodeLoginMailTask: return account - def _create_test_tenant_and_account(self, db_session_with_containers, fake=None): + def _create_test_tenant_and_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test tenant and account for testing. @@ -119,7 +121,7 @@ class TestSendEmailCodeLoginMailTask: tenant = Tenant( name=fake.company(), plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) @@ -138,7 +140,7 @@ class TestSendEmailCodeLoginMailTask: return account, tenant def test_send_email_code_login_mail_task_success_english( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful email code login mail sending in English. @@ -182,7 +184,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_success_chinese( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful email code login mail sending in Chinese. @@ -221,7 +223,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_success_multiple_languages( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful email code login mail sending with multiple languages. @@ -261,7 +263,7 @@ class TestSendEmailCodeLoginMailTask: assert call_args[1]["template_context"]["code"] == test_codes[i] def test_send_email_code_login_mail_task_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task when mail service is not initialized. @@ -299,7 +301,7 @@ class TestSendEmailCodeLoginMailTask: mock_email_service_instance.send_email.assert_not_called() def test_send_email_code_login_mail_task_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task when email service raises an exception. @@ -346,7 +348,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_invalid_parameters( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with invalid parameters. @@ -388,7 +390,7 @@ class TestSendEmailCodeLoginMailTask: mock_email_service_instance.send_email.assert_called_once() def test_send_email_code_login_mail_task_edge_cases( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with edge cases and boundary conditions. @@ -451,7 +453,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_database_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with database integration. @@ -497,7 +499,7 @@ class TestSendEmailCodeLoginMailTask: assert account.status == "active" def test_send_email_code_login_mail_task_redis_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with Redis integration. @@ -541,7 +543,7 @@ class TestSendEmailCodeLoginMailTask: redis_client.delete(cache_key) def test_send_email_code_login_mail_task_error_handling_comprehensive( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test comprehensive error handling for email code login mail task. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py index 95a867dbb5..a452bee9f8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py @@ -4,6 +4,7 @@ from unittest.mock import patch import pytest from sqlalchemy import delete +from sqlalchemy.orm import Session from configs import dify_config from core.app.app_config.entities import WorkflowUIBasedAppConfig @@ -30,7 +31,7 @@ from tasks.mail_human_input_delivery_task import dispatch_human_input_email_task @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(HumanInputFormRecipient)) db_session_with_containers.execute(delete(HumanInputDelivery)) db_session_with_containers.execute(delete(HumanInputForm)) @@ -42,7 +43,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_workspace_member(db_session_with_containers): +def _create_workspace_member(db_session_with_containers: Session): account = Account( email="owner@example.com", name="Owner", @@ -172,7 +173,9 @@ def _create_workflow_pause_state( db_session_with_containers.commit() -def test_dispatch_human_input_email_task_integration(monkeypatch: pytest.MonkeyPatch, db_session_with_containers): +def test_dispatch_human_input_email_task_integration( + monkeypatch: pytest.MonkeyPatch, db_session_with_containers: Session +): tenant, account = _create_workspace_member(db_session_with_containers) workflow_run_id = str(uuid.uuid4()) workflow_id = str(uuid.uuid4()) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py index 1a20b6deec..f8e54ea9e6 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py @@ -2,6 +2,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from tasks.mail_inner_task import send_inner_email_task @@ -51,7 +52,7 @@ class TestMailInnerTask: }, } - def test_send_inner_email_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test successful email sending with valid data. @@ -90,7 +91,9 @@ class TestMailInnerTask: html_content="Test email content", ) - def test_send_inner_email_single_recipient(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_single_recipient( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test email sending with single recipient. @@ -126,7 +129,9 @@ class TestMailInnerTask: html_content="Test email content", ) - def test_send_inner_email_empty_substitutions(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_empty_substitutions( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test email sending with empty substitutions. @@ -163,7 +168,7 @@ class TestMailInnerTask: ) def test_send_inner_email_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email sending when mail service is not initialized. @@ -193,7 +198,7 @@ class TestMailInnerTask: mock_external_service_dependencies["email_service"].send_raw_email.assert_not_called() def test_send_inner_email_template_rendering_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email sending when template rendering fails. @@ -222,7 +227,9 @@ class TestMailInnerTask: # Verify no email service calls due to exception mock_external_service_dependencies["email_service"].send_raw_email.assert_not_called() - def test_send_inner_email_service_error(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_service_error( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test email sending when email service fails. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py index d34828c4b1..c8c7a4d961 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py @@ -18,6 +18,7 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import delete, select +from sqlalchemy.orm import Session from extensions.ext_redis import redis_client from libs.email_i18n import EmailType @@ -42,7 +43,7 @@ class TestMailInviteMemberTask: """ @pytest.fixture(autouse=True) - def cleanup_database(self, db_session_with_containers): + def cleanup_database(self, db_session_with_containers: Session): """Clean up database before each test to ensure isolation.""" # Clear all test data db_session_with_containers.execute(delete(TenantAccountJoin)) @@ -78,7 +79,7 @@ class TestMailInviteMemberTask: "config": mock_config, } - def _create_test_account_and_tenant(self, db_session_with_containers): + def _create_test_account_and_tenant(self, db_session_with_containers: Session): """ Helper method to create a test account and tenant for testing. @@ -147,7 +148,7 @@ class TestMailInviteMemberTask: redis_client.setex(cache_key, 24 * 60 * 60, json.dumps(invitation_data)) # 24 hours return token - def _create_pending_account_for_invitation(self, db_session_with_containers, email, tenant): + def _create_pending_account_for_invitation(self, db_session_with_containers: Session, email, tenant): """ Helper method to create a pending account for invitation testing. @@ -185,7 +186,9 @@ class TestMailInviteMemberTask: return account - def test_send_invite_member_mail_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_invite_member_mail_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful invitation email sending with all parameters. @@ -231,7 +234,7 @@ class TestMailInviteMemberTask: assert template_context["url"] == f"https://console.dify.ai/activate?token={token}" def test_send_invite_member_mail_different_languages( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test invitation email sending with different language codes. @@ -263,7 +266,7 @@ class TestMailInviteMemberTask: assert call_args[1]["language_code"] == language def test_send_invite_member_mail_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test behavior when mail service is not initialized. @@ -292,7 +295,7 @@ class TestMailInviteMemberTask: mock_email_service.send_email.assert_not_called() def test_send_invite_member_mail_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test error handling when email service raises an exception. @@ -322,7 +325,7 @@ class TestMailInviteMemberTask: assert "Send invite member mail to %s failed" in error_call def test_send_invite_member_mail_template_context_validation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test template context contains all required fields for email rendering. @@ -368,7 +371,7 @@ class TestMailInviteMemberTask: assert template_context["url"] == f"https://console.dify.ai/activate?token={token}" def test_send_invite_member_mail_integration_with_redis_token( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test integration with Redis token validation. @@ -407,7 +410,7 @@ class TestMailInviteMemberTask: assert invitation_data["workspace_id"] == tenant.id def test_send_invite_member_mail_with_special_characters( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email sending with special characters in names and workspace names. @@ -449,7 +452,7 @@ class TestMailInviteMemberTask: assert template_context["workspace_name"] == workspace_name def test_send_invite_member_mail_real_database_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test real database integration with actual invitation flow. @@ -501,7 +504,7 @@ class TestMailInviteMemberTask: assert tenant_join.role == TenantAccountRole.NORMAL def test_send_invite_member_mail_token_lifecycle_management( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test token lifecycle management and validation. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py index e08b099480..176645a4ab 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py @@ -11,6 +11,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from libs.email_i18n import EmailType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole @@ -44,7 +45,7 @@ class TestMailOwnerTransferTask: "get_email_service": mock_get_email_service, } - def _create_test_account_and_tenant(self, db_session_with_containers): + def _create_test_account_and_tenant(self, db_session_with_containers: Session): """ Helper method to create test account and tenant for testing. @@ -86,7 +87,9 @@ class TestMailOwnerTransferTask: return account, tenant - def test_send_owner_transfer_confirm_task_success(self, db_session_with_containers, mock_mail_dependencies): + def test_send_owner_transfer_confirm_task_success( + self, db_session_with_containers: Session, mock_mail_dependencies + ): """ Test successful owner transfer confirmation email sending. @@ -127,7 +130,7 @@ class TestMailOwnerTransferTask: assert call_args[1]["template_context"]["WorkspaceName"] == test_workspace def test_send_owner_transfer_confirm_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test owner transfer confirmation email when mail service is not initialized. @@ -158,7 +161,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_owner_transfer_confirm_task_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test exception handling in owner transfer confirmation email. @@ -192,7 +195,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_called_once() def test_send_old_owner_transfer_notify_email_task_success( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test successful old owner transfer notification email sending. @@ -234,7 +237,7 @@ class TestMailOwnerTransferTask: assert call_args[1]["template_context"]["NewOwnerEmail"] == test_new_owner_email def test_send_old_owner_transfer_notify_email_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test old owner transfer notification email when mail service is not initialized. @@ -265,7 +268,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_old_owner_transfer_notify_email_task_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test exception handling in old owner transfer notification email. @@ -299,7 +302,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_called_once() def test_send_new_owner_transfer_notify_email_task_success( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test successful new owner transfer notification email sending. @@ -338,7 +341,7 @@ class TestMailOwnerTransferTask: assert call_args[1]["template_context"]["WorkspaceName"] == test_workspace def test_send_new_owner_transfer_notify_email_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test new owner transfer notification email when mail service is not initialized. @@ -367,7 +370,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_new_owner_transfer_notify_email_task_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test exception handling in new owner transfer notification email. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py index cced6f7780..071971f324 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py @@ -9,6 +9,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from libs.email_i18n import EmailType from tasks.mail_register_task import send_email_register_mail_task, send_email_register_mail_task_when_account_exist @@ -35,7 +36,7 @@ class TestMailRegisterTask: "get_email_service": mock_get_email_service, } - def test_send_email_register_mail_task_success(self, db_session_with_containers, mock_mail_dependencies): + def test_send_email_register_mail_task_success(self, db_session_with_containers: Session, mock_mail_dependencies): """Test successful email registration mail sending.""" fake = Faker() language = "en-US" @@ -56,7 +57,7 @@ class TestMailRegisterTask: ) def test_send_email_register_mail_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test email registration task when mail service is not initialized.""" mock_mail_dependencies["mail"].is_inited.return_value = False @@ -66,7 +67,9 @@ class TestMailRegisterTask: mock_mail_dependencies["get_email_service"].assert_not_called() mock_mail_dependencies["email_service"].send_email.assert_not_called() - def test_send_email_register_mail_task_exception_handling(self, db_session_with_containers, mock_mail_dependencies): + def test_send_email_register_mail_task_exception_handling( + self, db_session_with_containers: Session, mock_mail_dependencies + ): """Test email registration task exception handling.""" mock_mail_dependencies["email_service"].send_email.side_effect = Exception("Email service error") @@ -79,7 +82,7 @@ class TestMailRegisterTask: mock_logger.exception.assert_called_once_with("Send email register mail to %s failed", to_email) def test_send_email_register_mail_task_when_account_exist_success( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test successful email registration mail sending when account exists.""" fake = Faker() @@ -105,7 +108,7 @@ class TestMailRegisterTask: ) def test_send_email_register_mail_task_when_account_exist_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test account exist email task when mail service is not initialized.""" mock_mail_dependencies["mail"].is_inited.return_value = False @@ -118,7 +121,7 @@ class TestMailRegisterTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_email_register_mail_task_when_account_exist_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test account exist email task exception handling.""" mock_mail_dependencies["email_service"].send_email.side_effect = Exception("Email service error") diff --git a/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py b/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py index f01fcc1742..5eea985fdc 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py +++ b/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py @@ -4,12 +4,13 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker +from flask import Flask from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom, RagPipelineGenerateEntity from core.app.entities.rag_pipeline_invoke_entities import RagPipelineInvokeEntity from core.rag.pipeline.queue import TenantIsolatedTaskQueue -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Pipeline from models.workflow import Workflow from tasks.rag_pipeline.priority_rag_pipeline_run_task import ( @@ -69,14 +70,14 @@ class TestRagPipelineRunTasks: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -725,7 +726,7 @@ class TestRagPipelineRunTasks: assert queue1._task_key != queue2._task_key def test_run_single_rag_pipeline_task_success( - self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers + self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers: Flask ): """ Test successful run_single_rag_pipeline_task execution. @@ -760,7 +761,7 @@ class TestRagPipelineRunTasks: assert isinstance(call_kwargs["application_generate_entity"], RagPipelineGenerateEntity) def test_run_single_rag_pipeline_task_entity_validation_error( - self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers + self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers: Flask ): """ Test run_single_rag_pipeline_task with invalid entity data. @@ -805,7 +806,7 @@ class TestRagPipelineRunTasks: mock_pipeline_generator.assert_not_called() def test_run_single_rag_pipeline_task_database_entity_not_found( - self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers + self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers: Flask ): """ Test run_single_rag_pipeline_task with non-existent database entities. diff --git a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py index b43b622870..204f533978 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py @@ -3,6 +3,7 @@ from unittest.mock import ANY, call, patch import pytest from sqlalchemy import delete, func, select +from sqlalchemy.orm import Session from core.db.session_factory import session_factory from extensions.storage.storage_type import StorageType @@ -20,7 +21,7 @@ from tasks.remove_app_and_related_data_task import ( @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(WorkflowDraftVariable)) db_session_with_containers.execute(delete(WorkflowDraftVariableFile)) db_session_with_containers.execute(delete(UploadFile)) @@ -29,7 +30,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_tenant_and_app(db_session_with_containers): +def _create_tenant_and_app(db_session_with_containers: Session): tenant = Tenant(name=f"test_tenant_{uuid.uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -117,7 +118,7 @@ def _create_offload_data(db_session_with_containers, *, tenant_id: str, app_id: class TestDeleteDraftVariablesBatch: - def test_delete_draft_variables_batch_success(self, db_session_with_containers): + def test_delete_draft_variables_batch_success(self, db_session_with_containers: Session): """Test successful deletion of draft variables in batches.""" _, app1 = _create_tenant_and_app(db_session_with_containers) _, app2 = _create_tenant_and_app(db_session_with_containers) @@ -137,7 +138,7 @@ class TestDeleteDraftVariablesBatch: assert app1_remaining_count == 0 assert app2_remaining_count == 100 - def test_delete_draft_variables_batch_empty_result(self, db_session_with_containers): + def test_delete_draft_variables_batch_empty_result(self, db_session_with_containers: Session): """Test deletion when no draft variables exist for the app.""" result = delete_draft_variables_batch(str(uuid.uuid4()), 1000) @@ -176,7 +177,7 @@ class TestDeleteDraftVariableOffloadData: """Test the Offload data cleanup functionality.""" @patch("extensions.ext_storage.storage") - def test_delete_draft_variable_offload_data_success(self, mock_storage, db_session_with_containers): + def test_delete_draft_variable_offload_data_success(self, mock_storage, db_session_with_containers: Session): """Test successful deletion of offload data.""" tenant, app = _create_tenant_and_app(db_session_with_containers) offload_data = _create_offload_data(db_session_with_containers, tenant_id=tenant.id, app_id=app.id, count=3) diff --git a/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py b/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py index 34a1941c39..6365207661 100644 --- a/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py +++ b/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py @@ -1,12 +1,14 @@ from pathlib import Path +import pytest + from extensions.storage.opendal_storage import OpenDALStorage class TestOpenDALFsDefaultRoot: """Test that OpenDALStorage with scheme='fs' works correctly when no root is provided.""" - def test_fs_without_root_uses_default(self, tmp_path, monkeypatch): + def test_fs_without_root_uses_default(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): """When no root is specified, the default 'storage' should be used and passed to the Operator.""" # Change to tmp_path so the default "storage" dir is created there monkeypatch.chdir(tmp_path) @@ -25,7 +27,7 @@ class TestOpenDALFsDefaultRoot: # Cleanup storage.delete("test_default_root.txt") - def test_fs_with_explicit_root(self, tmp_path): + def test_fs_with_explicit_root(self, tmp_path: Path): """When root is explicitly provided, it should be used.""" custom_root = str(tmp_path / "custom_storage") storage = OpenDALStorage(scheme="fs", root=custom_root) @@ -38,7 +40,7 @@ class TestOpenDALFsDefaultRoot: # Cleanup storage.delete("test_explicit_root.txt") - def test_fs_with_env_var_root(self, tmp_path, monkeypatch): + def test_fs_with_env_var_root(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): """When OPENDAL_FS_ROOT env var is set, it should be picked up via _get_opendal_kwargs.""" env_root = str(tmp_path / "env_storage") monkeypatch.setenv("OPENDAL_FS_ROOT", env_root) diff --git a/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py b/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py index b00d827e37..6402e7da2b 100644 --- a/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py +++ b/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py @@ -175,7 +175,7 @@ class TestWorkflowPauseIntegration: """Comprehensive integration tests for workflow pause functionality.""" @pytest.fixture(autouse=True) - def setup_test_data(self, db_session_with_containers): + def setup_test_data(self, db_session_with_containers: Session): """Set up test data for each test method using TestContainers.""" # Create test tenant and account diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py index 19a41b6186..a5086b4c5d 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py @@ -1,12 +1,14 @@ from textwrap import dedent +from flask import Flask + from .test_utils import CodeExecutorTestMixin class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): """Test class for JavaScript code executor functionality.""" - def test_javascript_plain(self, flask_app_with_containers): + def test_javascript_plain(self, flask_app_with_containers: Flask): """Test basic JavaScript code execution with console.log output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -14,7 +16,7 @@ class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): result_message = CodeExecutor.execute_code(language=CodeLanguage.JAVASCRIPT, preload="", code=code) assert result_message == "Hello World\n" - def test_javascript_json(self, flask_app_with_containers): + def test_javascript_json(self, flask_app_with_containers: Flask): """Test JavaScript code execution with JSON output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -25,7 +27,7 @@ class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): result = CodeExecutor.execute_code(language=CodeLanguage.JAVASCRIPT, preload="", code=code) assert result == '{"Hello":"World"}\n' - def test_javascript_with_code_template(self, flask_app_with_containers): + def test_javascript_with_code_template(self, flask_app_with_containers: Flask): """Test JavaScript workflow code template execution with inputs""" CodeExecutor, CodeLanguage = self.code_executor_imports JavascriptCodeProvider, _ = self.javascript_imports @@ -37,7 +39,7 @@ class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): ) assert result == {"result": "HelloWorld"} - def test_javascript_get_runner_script(self, flask_app_with_containers): + def test_javascript_get_runner_script(self, flask_app_with_containers: Flask): """Test JavaScript template transformer runner script generation""" _, NodeJsTemplateTransformer = self.javascript_imports diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py index ddb079f00c..8b4c3c3d4a 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py @@ -1,12 +1,14 @@ import base64 +from flask import Flask + from .test_utils import CodeExecutorTestMixin class TestJinja2CodeExecutor(CodeExecutorTestMixin): """Test class for Jinja2 code executor functionality.""" - def test_jinja2(self, flask_app_with_containers): + def test_jinja2(self, flask_app_with_containers: Flask): """Test basic Jinja2 template execution with variable substitution""" CodeExecutor, CodeLanguage = self.code_executor_imports _, Jinja2TemplateTransformer = self.jinja2_imports @@ -25,7 +27,7 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): ) assert result == "<>Hello World<>\n" - def test_jinja2_with_code_template(self, flask_app_with_containers): + def test_jinja2_with_code_template(self, flask_app_with_containers: Flask): """Test Jinja2 workflow code template execution with inputs""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -34,7 +36,7 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): ) assert result == {"result": "Hello World"} - def test_jinja2_get_runner_script(self, flask_app_with_containers): + def test_jinja2_get_runner_script(self, flask_app_with_containers: Flask): """Test Jinja2 template transformer runner script generation""" _, Jinja2TemplateTransformer = self.jinja2_imports @@ -43,7 +45,7 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): assert runner_script.count(Jinja2TemplateTransformer._inputs_placeholder) == 1 assert runner_script.count(Jinja2TemplateTransformer._result_tag) == 2 - def test_jinja2_template_with_special_characters(self, flask_app_with_containers): + def test_jinja2_template_with_special_characters(self, flask_app_with_containers: Flask): """ Test that templates with special characters (quotes, newlines) render correctly. This is a regression test for issue #26818 where textarea pre-fill values diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py index 6d93df2472..0de41e1312 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py @@ -1,12 +1,14 @@ from textwrap import dedent +from flask import Flask + from .test_utils import CodeExecutorTestMixin class TestPython3CodeExecutor(CodeExecutorTestMixin): """Test class for Python3 code executor functionality.""" - def test_python3_plain(self, flask_app_with_containers): + def test_python3_plain(self, flask_app_with_containers: Flask): """Test basic Python3 code execution with print output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -14,7 +16,7 @@ class TestPython3CodeExecutor(CodeExecutorTestMixin): result = CodeExecutor.execute_code(language=CodeLanguage.PYTHON3, preload="", code=code) assert result == "Hello World\n" - def test_python3_json(self, flask_app_with_containers): + def test_python3_json(self, flask_app_with_containers: Flask): """Test Python3 code execution with JSON output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -25,7 +27,7 @@ class TestPython3CodeExecutor(CodeExecutorTestMixin): result = CodeExecutor.execute_code(language=CodeLanguage.PYTHON3, preload="", code=code) assert result == '{"Hello": "World"}\n' - def test_python3_with_code_template(self, flask_app_with_containers): + def test_python3_with_code_template(self, flask_app_with_containers: Flask): """Test Python3 workflow code template execution with inputs""" CodeExecutor, CodeLanguage = self.code_executor_imports Python3CodeProvider, _ = self.python3_imports @@ -37,7 +39,7 @@ class TestPython3CodeExecutor(CodeExecutorTestMixin): ) assert result == {"result": "HelloWorld"} - def test_python3_get_runner_script(self, flask_app_with_containers): + def test_python3_get_runner_script(self, flask_app_with_containers: Flask): """Test Python3 template transformer runner script generation""" _, Python3TemplateTransformer = self.python3_imports diff --git a/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..62d3d79cf1 --- /dev/null +++ b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py @@ -0,0 +1,103 @@ +"""Unit tests for the Markdown API docs generator.""" + +import importlib.util +import sys +from pathlib import Path + + +def _load_generate_swagger_markdown_docs_module(): + api_dir = Path(__file__).resolve().parents[3] + script_path = api_dir / "dev" / "generate_swagger_markdown_docs.py" + + spec = importlib.util.spec_from_file_location("generate_swagger_markdown_docs", script_path) + assert spec + assert spec.loader + + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) # type: ignore[attr-defined] + return module + + +def test_generate_markdown_docs_keeps_split_docs_and_merges_fastopenapi_into_console(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "openapi" + markdown_dir = tmp_path / "markdown" + stale_combined_doc = markdown_dir / "api-reference.md" + markdown_dir.mkdir() + stale_combined_doc.write_text("stale", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n\n## Routes\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + written_paths = module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert [path.name for path in written_paths] == [ + "console-swagger.md", + "web-swagger.md", + "service-swagger.md", + ] + assert not stale_combined_doc.exists() + assert not list(swagger_dir.glob("*.json")) + + console_markdown = (markdown_dir / "console-swagger.md").read_text(encoding="utf-8") + assert "## FastOpenAPI Preview (OpenAPI 3.0)" in console_markdown + assert "### fastopenapi-console-openapi" in console_markdown + assert "#### Routes" in console_markdown + assert "FastOpenAPI Preview" not in (markdown_dir / "web-swagger.md").read_text(encoding="utf-8") + assert "FastOpenAPI Preview" not in (markdown_dir / "service-swagger.md").read_text(encoding="utf-8") + + +def test_generate_markdown_docs_only_removes_generated_specs_from_separate_swagger_dir(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "swagger" + markdown_dir = tmp_path / "markdown" + swagger_dir.mkdir() + existing_file = swagger_dir / "existing.txt" + existing_file.write_text("keep me", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert existing_file.read_text(encoding="utf-8") == "keep me" + assert not list(swagger_dir.glob("*.json")) diff --git a/api/tests/unit_tests/commands/test_generate_swagger_specs.py b/api/tests/unit_tests/commands/test_generate_swagger_specs.py index e77e875081..79a577087d 100644 --- a/api/tests/unit_tests/commands/test_generate_swagger_specs.py +++ b/api/tests/unit_tests/commands/test_generate_swagger_specs.py @@ -6,6 +6,16 @@ import sys from pathlib import Path +def _walk_values(value): + yield value + if isinstance(value, dict): + for child in value.values(): + yield from _walk_values(child) + elif isinstance(value, list): + for child in value: + yield from _walk_values(child) + + def _load_generate_swagger_specs_module(): api_dir = Path(__file__).resolve().parents[3] script_path = api_dir / "dev" / "generate_swagger_specs.py" @@ -35,3 +45,32 @@ def test_generate_specs_writes_console_web_and_service_swagger_files(tmp_path): payload = json.loads(path.read_text(encoding="utf-8")) assert payload["swagger"] == "2.0" assert "paths" in payload + + +def test_generate_specs_writes_swagger_with_resolvable_references_and_no_nulls(tmp_path): + module = _load_generate_swagger_specs_module() + + written_paths = module.generate_specs(tmp_path) + + for path in written_paths: + payload = json.loads(path.read_text(encoding="utf-8")) + definitions = payload["definitions"] + refs = { + item["$ref"].removeprefix("#/definitions/") + for item in _walk_values(payload) + if isinstance(item, dict) and isinstance(item.get("$ref"), str) + } + + assert refs <= set(definitions) + assert all(value is not None for value in _walk_values(payload)) + + +def test_generate_specs_is_idempotent(tmp_path): + module = _load_generate_swagger_specs_module() + + first_paths = module.generate_specs(tmp_path / "first") + second_paths = module.generate_specs(tmp_path / "second") + + assert [path.name for path in first_paths] == [path.name for path in second_paths] + for first_path, second_path in zip(first_paths, second_paths): + assert first_path.read_text(encoding="utf-8") == second_path.read_text(encoding="utf-8") diff --git a/api/tests/unit_tests/configs/test_dify_config.py b/api/tests/unit_tests/configs/test_dify_config.py index bad246a4bb..57dbf453de 100644 --- a/api/tests/unit_tests/configs/test_dify_config.py +++ b/api/tests/unit_tests/configs/test_dify_config.py @@ -114,8 +114,8 @@ def test_flask_configs(monkeypatch: pytest.MonkeyPatch): "pool_recycle": 3600, "pool_size": 30, "pool_use_lifo": False, - "pool_reset_on_return": None, "pool_timeout": 30, + "pool_reset_on_return": "rollback", } assert config["CONSOLE_WEB_URL"] == "https://example.com" diff --git a/api/tests/unit_tests/controllers/common/test_helpers.py b/api/tests/unit_tests/controllers/common/test_helpers.py index 59c463177c..376a7a90c5 100644 --- a/api/tests/unit_tests/controllers/common/test_helpers.py +++ b/api/tests/unit_tests/controllers/common/test_helpers.py @@ -57,7 +57,7 @@ class TestGuessFileInfoFromResponse: (False, "bin"), ], ) - def test_generated_filename_when_missing(self, monkeypatch, magic_available, expected_ext): + def test_generated_filename_when_missing(self, monkeypatch: pytest.MonkeyPatch, magic_available, expected_ext): if magic_available: if helpers.magic is None: pytest.skip("python-magic is not installed, cannot run 'magic_available=True' test variant") @@ -155,7 +155,7 @@ class TestMagicImportWarnings: ) def test_magic_import_warning_per_platform( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, platform_name, expected_message, ): diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index 56c8160f02..6cf36e3bce 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -17,6 +17,14 @@ class ProductModel(BaseModel): price: float +class ChildModel(BaseModel): + value: str + + +class ParentModel(BaseModel): + child: ChildModel + + @pytest.fixture(autouse=True) def mock_console_ns(): """Mock the console_ns to avoid circular imports during test collection.""" @@ -64,6 +72,22 @@ def test_register_schema_model_passes_schema_from_pydantic(): assert schema == expected_schema +def test_register_schema_model_promotes_nested_pydantic_definitions(): + from controllers.common.schema import DEFAULT_REF_TEMPLATE_SWAGGER_2_0, register_schema_model + + namespace = MagicMock(spec=Namespace) + + register_schema_model(namespace, ParentModel) + + called_schemas = {call.args[0]: call.args[1] for call in namespace.schema_model.call_args_list} + parent_schema = ParentModel.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + + assert set(called_schemas) == {"ParentModel", "ChildModel"} + assert "$defs" not in called_schemas["ParentModel"] + assert called_schemas["ParentModel"]["properties"]["child"]["$ref"] == "#/definitions/ChildModel" + assert called_schemas["ChildModel"] == parent_schema["$defs"]["ChildModel"] + + def test_register_schema_models_registers_multiple_models(): from controllers.common.schema import register_schema_models @@ -77,7 +101,7 @@ def test_register_schema_models_registers_multiple_models(): assert called_names == ["UserModel", "ProductModel"] -def test_register_schema_models_calls_register_schema_model(monkeypatch): +def test_register_schema_models_calls_register_schema_model(monkeypatch: pytest.MonkeyPatch): from controllers.common.schema import register_schema_models namespace = MagicMock(spec=Namespace) diff --git a/api/tests/unit_tests/controllers/console/app/test_app_response_models.py b/api/tests/unit_tests/controllers/console/app/test_app_response_models.py index 35d07a987d..80e7c41a9e 100644 --- a/api/tests/unit_tests/controllers/console/app/test_app_response_models.py +++ b/api/tests/unit_tests/controllers/console/app/test_app_response_models.py @@ -10,6 +10,8 @@ from typing import Any import pytest from flask.views import MethodView +from pydantic import ValidationError +from werkzeug.datastructures import MultiDict # kombu references MethodView as a global when importing celery/kombu pools. if not hasattr(builtins, "MethodView"): @@ -174,6 +176,101 @@ def _dummy_workflow(): ) +def test_app_list_query_normalizes_orpc_bracket_tag_ids(app_module): + first_tag_id = "8c4ef3d1-58a1-4d94-8a1c-1c171d889e08" + second_tag_id = "3c39395b-6d1f-4030-8b17-eaa7cc85221c" + query_args = MultiDict( + [ + ("page", "1"), + ("limit", "30"), + ("tag_ids[1]", second_tag_id), + ("tag_ids[0]", first_tag_id), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert query.tag_ids == [first_tag_id, second_tag_id] + + +def test_app_list_query_preserves_regular_query_params(app_module): + query_args = MultiDict( + [ + ("page", "2"), + ("limit", "50"), + ("mode", "chat"), + ("name", "Sales Copilot"), + ("is_created_by_me", "true"), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert normalized == { + "page": "2", + "limit": "50", + "mode": "chat", + "name": "Sales Copilot", + "is_created_by_me": "true", + } + assert query.page == 2 + assert query.limit == 50 + assert query.mode == "chat" + assert query.name == "Sales Copilot" + assert query.is_created_by_me is True + assert query.tag_ids is None + + +def test_app_list_query_normalizes_empty_bracket_tag_ids_to_none(app_module): + query_args = MultiDict( + [ + ("tag_ids[0]", ""), + ("tag_ids[1]", " "), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert normalized == {"tag_ids": ["", " "]} + assert query.tag_ids is None + + +def test_app_list_query_rejects_invalid_bracket_tag_id(app_module): + normalized = app_module._normalize_app_list_query_args(MultiDict([("tag_ids[0]", "not-a-uuid")])) + + with pytest.raises(ValidationError): + app_module.AppListQuery.model_validate(normalized) + + +def test_app_list_query_sorts_bracket_tag_ids_by_index(app_module): + first_tag_id = "8c4ef3d1-58a1-4d94-8a1c-1c171d889e08" + second_tag_id = "3c39395b-6d1f-4030-8b17-eaa7cc85221c" + third_tag_id = "9d5ec0f7-4f2b-4e7f-9c13-1e7a034d0eb1" + query_args = MultiDict( + [ + ("tag_ids[2]", third_tag_id), + ("tag_ids[1]", second_tag_id), + ("tag_ids[0]", first_tag_id), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert query.tag_ids == [first_tag_id, second_tag_id, third_tag_id] + + +def test_app_list_query_rejects_flat_tag_ids(app_module): + tag_id = "8c4ef3d1-58a1-4d94-8a1c-1c171d889e08" + normalized = app_module._normalize_app_list_query_args(MultiDict([("tag_ids", tag_id)])) + + with pytest.raises(ValidationError): + app_module.AppListQuery.model_validate(normalized) + + def test_app_partial_serialization_uses_aliases(app_models): AppPartial = app_models.AppPartial created_at = _ts() diff --git a/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py b/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py index c4a8148446..d06978c3fa 100644 --- a/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py +++ b/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py @@ -12,8 +12,7 @@ from controllers.console.app import workflow_run as workflow_run_module from controllers.web.error import NotFoundError from graphon.entities.pause_reason import HumanInputRequired from graphon.enums import WorkflowExecutionStatus -from graphon.nodes.human_input.entities import FormInput, UserAction -from graphon.nodes.human_input.enums import FormInputType +from graphon.nodes.human_input.entities import ParagraphInputConfig, UserActionConfig from libs import login as login_lib from models.account import Account, AccountStatus, TenantAccountRole from models.workflow import WorkflowRun @@ -63,8 +62,8 @@ def test_pause_details_returns_backstage_input_url(app: Flask, monkeypatch: pyte reason = HumanInputRequired( form_id="form-1", form_content="content", - inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], - actions=[UserAction(id="approve", title="Approve")], + inputs=[ParagraphInputConfig(output_variable_name="name")], + actions=[UserActionConfig(id="approve", title="Approve")], node_id="node-1", node_title="Ask Name", ) diff --git a/api/tests/unit_tests/controllers/console/auth/test_account_activation.py b/api/tests/unit_tests/controllers/console/auth/test_account_activation.py index d3e864a75a..0fb0ebc330 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_account_activation.py +++ b/api/tests/unit_tests/controllers/console/auth/test_account_activation.py @@ -67,7 +67,7 @@ class TestActivateCheckApi: assert response["data"]["email"] == "invitee@example.com" @patch("controllers.console.auth.activate.RegisterService.get_invitation_with_case_fallback") - def test_check_invalid_invitation_token(self, mock_get_invitation, app): + def test_check_invalid_invitation_token(self, mock_get_invitation, app: Flask): """ Test checking invalid invitation token. @@ -185,7 +185,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, ): @@ -227,7 +227,7 @@ class TestActivateApi: mock_db.session.commit.assert_called_once() @patch("controllers.console.auth.activate.RegisterService.get_invitation_with_case_fallback") - def test_activation_with_invalid_token(self, mock_get_invitation, app): + def test_activation_with_invalid_token(self, mock_get_invitation, app: Flask): """ Test account activation with invalid token. @@ -263,7 +263,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, ): @@ -312,7 +312,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, language, @@ -358,7 +358,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, ): """ @@ -398,7 +398,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, ): """ @@ -438,7 +438,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, ): diff --git a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py index b7bc73da5f..102af9b250 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py +++ b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py @@ -140,7 +140,7 @@ class TestEmailCodeLoginSendEmailApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.is_email_send_ip_limit") - def test_send_email_code_ip_rate_limited(self, mock_is_ip_limit, mock_db, app): + def test_send_email_code_ip_rate_limited(self, mock_is_ip_limit, mock_db, app: Flask): """ Test email code sending blocked by IP rate limit. @@ -160,7 +160,7 @@ class TestEmailCodeLoginSendEmailApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.is_email_send_ip_limit") @patch("controllers.console.auth.login.AccountService.get_user_through_email") - def test_send_email_code_frozen_account(self, mock_get_user, mock_is_ip_limit, mock_db, app): + def test_send_email_code_frozen_account(self, mock_get_user, mock_is_ip_limit, mock_db, app: Flask): """ Test email code sending to frozen account. @@ -195,7 +195,7 @@ class TestEmailCodeLoginSendEmailApi: mock_get_user, mock_is_ip_limit, mock_db, - app, + app: Flask, mock_account, language_input, expected_language, @@ -267,7 +267,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -315,7 +315,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -353,7 +353,7 @@ class TestEmailCodeLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.get_email_code_login_data") - def test_email_code_login_invalid_token(self, mock_get_data, mock_db, app): + def test_email_code_login_invalid_token(self, mock_get_data, mock_db, app: Flask): """ Test email code login with invalid token. @@ -375,7 +375,7 @@ class TestEmailCodeLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.get_email_code_login_data") - def test_email_code_login_email_mismatch(self, mock_get_data, mock_db, app): + def test_email_code_login_email_mismatch(self, mock_get_data, mock_db, app: Flask): """ Test email code login with mismatched email. @@ -397,7 +397,7 @@ class TestEmailCodeLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.get_email_code_login_data") - def test_email_code_login_wrong_code(self, mock_get_data, mock_db, app): + def test_email_code_login_wrong_code(self, mock_get_data, mock_db, app: Flask): """ Test email code login with incorrect code. @@ -431,7 +431,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, ): """ @@ -474,7 +474,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, ): """ @@ -515,7 +515,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, ): """ diff --git a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py index d089be8905..ace2ce5706 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py +++ b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py @@ -9,7 +9,7 @@ This module tests the core authentication endpoints including: """ import base64 -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, Mock, patch import pytest from flask import Flask @@ -52,12 +52,12 @@ class TestLoginApi: return app @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return Api(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client.""" api.add_resource(LoginApi, "/login") return app.test_client() @@ -97,7 +97,7 @@ class TestLoginApi: mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -141,14 +141,14 @@ class TestLoginApi: @patch("controllers.console.auth.login.AccountService.reset_login_error_rate_limit") def test_successful_login_with_valid_invitation( self, - mock_reset_rate_limit, + mock_reset_rate_limit: Mock, mock_login, mock_get_tenants, mock_authenticate, mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -188,7 +188,7 @@ class TestLoginApi: @patch("controllers.console.auth.login.dify_config.BILLING_ENABLED", False) @patch("controllers.console.auth.login.AccountService.is_login_error_rate_limit") @patch("controllers.console.auth.login.RegisterService.get_invitation_with_case_fallback") - def test_login_fails_when_rate_limited(self, mock_get_invitation, mock_is_rate_limit, mock_db, app): + def test_login_fails_when_rate_limited(self, mock_get_invitation, mock_is_rate_limit, mock_db, app: Flask): """ Test login rejection when rate limit is exceeded. @@ -216,7 +216,7 @@ class TestLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.dify_config.BILLING_ENABLED", True) @patch("controllers.console.auth.login.BillingService.is_email_in_freeze") - def test_login_fails_when_account_frozen(self, mock_is_frozen, mock_db, app): + def test_login_fails_when_account_frozen(self, mock_is_frozen, mock_db, app: Flask): """ Test login rejection for frozen accounts. @@ -253,7 +253,7 @@ class TestLoginApi: mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, ): """ Test login failure with invalid credentials. @@ -290,7 +290,7 @@ class TestLoginApi: @patch("controllers.console.auth.login.RegisterService.get_invitation_with_case_fallback") @patch("controllers.console.auth.login.AccountService.authenticate") def test_login_fails_for_banned_account( - self, mock_authenticate, mock_get_invitation, mock_is_rate_limit, mock_db, app + self, mock_authenticate, mock_get_invitation, mock_is_rate_limit, mock_db, app: Flask ): """ Test login rejection for banned accounts. @@ -328,14 +328,14 @@ class TestLoginApi: @patch("controllers.console.auth.login.FeatureService.get_system_features") def test_login_fails_when_no_workspace_and_limit_exceeded( self, - mock_get_features, - mock_get_tenants, - mock_authenticate, - mock_get_invitation, - mock_is_rate_limit, - mock_db, - app, - mock_account, + mock_get_features: MagicMock, + mock_get_tenants: MagicMock, + mock_authenticate: MagicMock, + mock_get_invitation: MagicMock, + mock_is_rate_limit: MagicMock, + mock_db: MagicMock, + app: Flask, + mock_account: MagicMock, ): """ Test login failure when user has no workspace and workspace limit exceeded. @@ -367,7 +367,7 @@ class TestLoginApi: @patch("controllers.console.auth.login.dify_config.BILLING_ENABLED", False) @patch("controllers.console.auth.login.AccountService.is_login_error_rate_limit") @patch("controllers.console.auth.login.RegisterService.get_invitation_with_case_fallback") - def test_login_invitation_email_mismatch(self, mock_get_invitation, mock_is_rate_limit, mock_db, app): + def test_login_invitation_email_mismatch(self, mock_get_invitation, mock_is_rate_limit, mock_db, app: Flask): """ Test login failure when invitation email doesn't match login email. @@ -412,7 +412,7 @@ class TestLoginApi: mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -448,7 +448,7 @@ class TestLoginApi: mock_revoke_token, mock_get_token_data, mock_db, - app, + app: Flask, ): mock_get_token_data.return_value = {"email": "User@Example.com", "code": "123456"} mock_get_account.side_effect = Unauthorized("Account is banned.") @@ -491,7 +491,7 @@ class TestLogoutApi: @patch("controllers.console.auth.login.AccountService.logout") @patch("controllers.console.auth.login.flask_login.logout_user") def test_successful_logout( - self, mock_logout_user, mock_service_logout, mock_current_account, mock_db, app, mock_account + self, mock_logout_user, mock_service_logout, mock_current_account, mock_db, app: Flask, mock_account ): """ Test successful logout flow. @@ -518,7 +518,7 @@ class TestLogoutApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.current_account_with_tenant") @patch("controllers.console.auth.login.flask_login") - def test_logout_anonymous_user(self, mock_flask_login, mock_current_account, mock_db, app): + def test_logout_anonymous_user(self, mock_flask_login, mock_current_account, mock_db, app: Flask): """ Test logout for anonymous (not logged in) user. diff --git a/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py b/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py index d010f60866..22974ca416 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py +++ b/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py @@ -28,12 +28,12 @@ class TestRefreshTokenApi: return app @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return Api(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client.""" api.add_resource(RefreshTokenApi, "/refresh-token") return app.test_client() @@ -74,7 +74,7 @@ class TestRefreshTokenApi: assert response.json["result"] == "success" @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) - def test_refresh_fails_without_token(self, mock_extract_token, app): + def test_refresh_fails_without_token(self, mock_extract_token, app: Flask): """ Test token refresh failure when no refresh token provided. @@ -98,7 +98,7 @@ class TestRefreshTokenApi: @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) @patch("controllers.console.auth.login.AccountService.refresh_token", autospec=True) - def test_refresh_fails_with_invalid_token(self, mock_refresh_token, mock_extract_token, app): + def test_refresh_fails_with_invalid_token(self, mock_refresh_token, mock_extract_token, app: Flask): """ Test token refresh failure with invalid refresh token. @@ -123,7 +123,7 @@ class TestRefreshTokenApi: @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) @patch("controllers.console.auth.login.AccountService.refresh_token", autospec=True) - def test_refresh_fails_with_expired_token(self, mock_refresh_token, mock_extract_token, app): + def test_refresh_fails_with_expired_token(self, mock_refresh_token, mock_extract_token, app: Flask): """ Test token refresh failure with expired refresh token. @@ -148,7 +148,7 @@ class TestRefreshTokenApi: @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) @patch("controllers.console.auth.login.AccountService.refresh_token", autospec=True) - def test_refresh_with_empty_token(self, mock_refresh_token, mock_extract_token, app): + def test_refresh_with_empty_token(self, mock_refresh_token, mock_extract_token, app: Flask): """ Test token refresh with empty string token. diff --git a/api/tests/unit_tests/controllers/console/billing/test_billing.py b/api/tests/unit_tests/controllers/console/billing/test_billing.py index 810f1b94fc..defa9064fd 100644 --- a/api/tests/unit_tests/controllers/console/billing/test_billing.py +++ b/api/tests/unit_tests/controllers/console/billing/test_billing.py @@ -49,7 +49,7 @@ class TestPartnerTenants: mock_csrf.return_value = None yield {"db": mock_db, "csrf": mock_csrf} - def test_put_success(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_success(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test successful partner tenants bindings sync.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") @@ -79,7 +79,7 @@ class TestPartnerTenants: mock_account.id, "partner-key-123", click_id ) - def test_put_invalid_partner_key_base64(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_invalid_partner_key_base64(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that invalid base64 partner_key raises BadRequest.""" # Arrange invalid_partner_key = "invalid-base64-!@#$" @@ -104,7 +104,7 @@ class TestPartnerTenants: resource.put(invalid_partner_key) assert "Invalid partner_key" in str(exc_info.value) - def test_put_missing_click_id(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_missing_click_id(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that missing click_id raises BadRequest.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") @@ -128,7 +128,9 @@ class TestPartnerTenants: with pytest.raises(BadRequest): resource.put(partner_key_encoded) - def test_put_billing_service_json_decode_error(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_billing_service_json_decode_error( + self, app: Flask, mock_account, mock_billing_service, mock_decorators + ): """Test handling of billing service JSON decode error. When billing service returns non-200 status code with invalid JSON response, @@ -174,7 +176,7 @@ class TestPartnerTenants: assert isinstance(exc_info.value, json.JSONDecodeError) assert "Expecting value" in str(exc_info.value) - def test_put_empty_click_id(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_empty_click_id(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that empty click_id raises BadRequest.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") @@ -199,7 +201,7 @@ class TestPartnerTenants: resource.put(partner_key_encoded) assert "Invalid partner information" in str(exc_info.value) - def test_put_empty_partner_key_after_decode(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_empty_partner_key_after_decode(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that empty partner_key after decode raises BadRequest.""" # Arrange # Base64 encode an empty string @@ -225,7 +227,7 @@ class TestPartnerTenants: resource.put(empty_partner_key_encoded) assert "Invalid partner information" in str(exc_info.value) - def test_put_empty_user_id(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_empty_user_id(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that empty user id raises BadRequest.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") diff --git a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py index 5136922e88..9c5b5ec256 100644 --- a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py +++ b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, NotFound from controllers.console import console_ns @@ -29,7 +30,7 @@ def unwrap(func): class TestDatasourcePluginOAuthAuthorizationUrl: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasourcePluginOAuthAuthorizationUrl() method = unwrap(api.get) @@ -61,7 +62,7 @@ class TestDatasourcePluginOAuthAuthorizationUrl: assert response.status_code == 200 - def test_get_no_oauth_config(self, app): + def test_get_no_oauth_config(self, app: Flask): api = DatasourcePluginOAuthAuthorizationUrl() method = unwrap(api.get) @@ -80,7 +81,7 @@ class TestDatasourcePluginOAuthAuthorizationUrl: with pytest.raises(ValueError): method(api, "notion") - def test_get_without_credential_id_sets_cookie(self, app): + def test_get_without_credential_id_sets_cookie(self, app: Flask): api = DatasourcePluginOAuthAuthorizationUrl() method = unwrap(api.get) @@ -115,7 +116,7 @@ class TestDatasourcePluginOAuthAuthorizationUrl: class TestDatasourceOAuthCallback: - def test_callback_success_new_credential(self, app): + def test_callback_success_new_credential(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -157,7 +158,7 @@ class TestDatasourceOAuthCallback: assert response.status_code == 302 - def test_callback_missing_context(self, app): + def test_callback_missing_context(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -165,7 +166,7 @@ class TestDatasourceOAuthCallback: with pytest.raises(Forbidden): method(api, "notion") - def test_callback_invalid_context(self, app): + def test_callback_invalid_context(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -180,7 +181,7 @@ class TestDatasourceOAuthCallback: with pytest.raises(Forbidden): method(api, "notion") - def test_callback_oauth_config_not_found(self, app): + def test_callback_oauth_config_not_found(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -202,7 +203,7 @@ class TestDatasourceOAuthCallback: with pytest.raises(NotFound): method(api, "notion") - def test_callback_reauthorize_existing_credential(self, app): + def test_callback_reauthorize_existing_credential(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -245,7 +246,7 @@ class TestDatasourceOAuthCallback: assert response.status_code == 302 assert "/oauth-callback" in response.location - def test_callback_context_id_from_cookie(self, app): + def test_callback_context_id_from_cookie(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -289,7 +290,7 @@ class TestDatasourceOAuthCallback: class TestDatasourceAuth: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasourceAuth() method = unwrap(api.post) @@ -312,7 +313,7 @@ class TestDatasourceAuth: assert status == 200 - def test_post_invalid_credentials(self, app): + def test_post_invalid_credentials(self, app: Flask): api = DatasourceAuth() method = unwrap(api.post) @@ -334,7 +335,7 @@ class TestDatasourceAuth: with pytest.raises(ValueError): method(api, "notion") - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasourceAuth() method = unwrap(api.get) @@ -355,7 +356,7 @@ class TestDatasourceAuth: assert status == 200 assert response["result"] - def test_post_missing_credentials(self, app): + def test_post_missing_credentials(self, app: Flask): api = DatasourceAuth() method = unwrap(api.post) @@ -372,7 +373,7 @@ class TestDatasourceAuth: with pytest.raises(ValueError): method(api, "notion") - def test_get_empty_list(self, app): + def test_get_empty_list(self, app: Flask): api = DatasourceAuth() method = unwrap(api.get) @@ -395,7 +396,7 @@ class TestDatasourceAuth: class TestDatasourceAuthDeleteApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasourceAuthDeleteApi() method = unwrap(api.post) @@ -418,7 +419,7 @@ class TestDatasourceAuthDeleteApi: assert status == 200 - def test_delete_missing_credential_id(self, app): + def test_delete_missing_credential_id(self, app: Flask): api = DatasourceAuthDeleteApi() method = unwrap(api.post) @@ -437,7 +438,7 @@ class TestDatasourceAuthDeleteApi: class TestDatasourceAuthUpdateApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -460,7 +461,7 @@ class TestDatasourceAuthUpdateApi: assert status == 201 - def test_update_with_credentials_none(self, app): + def test_update_with_credentials_none(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -484,7 +485,7 @@ class TestDatasourceAuthUpdateApi: update_mock.assert_called_once() assert status == 201 - def test_update_name_only(self, app): + def test_update_name_only(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -507,7 +508,7 @@ class TestDatasourceAuthUpdateApi: assert status == 201 - def test_update_with_empty_credentials_dict(self, app): + def test_update_with_empty_credentials_dict(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -533,7 +534,7 @@ class TestDatasourceAuthUpdateApi: class TestDatasourceAuthListApi: - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = DatasourceAuthListApi() method = unwrap(api.get) @@ -553,7 +554,7 @@ class TestDatasourceAuthListApi: assert status == 200 - def test_auth_list_empty(self, app): + def test_auth_list_empty(self, app: Flask): api = DatasourceAuthListApi() method = unwrap(api.get) @@ -574,7 +575,7 @@ class TestDatasourceAuthListApi: assert status == 200 assert response["result"] == [] - def test_hardcode_list_empty(self, app): + def test_hardcode_list_empty(self, app: Flask): api = DatasourceHardCodeAuthListApi() method = unwrap(api.get) @@ -597,7 +598,7 @@ class TestDatasourceAuthListApi: class TestDatasourceHardCodeAuthListApi: - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = DatasourceHardCodeAuthListApi() method = unwrap(api.get) @@ -619,7 +620,7 @@ class TestDatasourceHardCodeAuthListApi: class TestDatasourceAuthOauthCustomClient: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.post) @@ -642,7 +643,7 @@ class TestDatasourceAuthOauthCustomClient: assert status == 200 - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.delete) @@ -662,7 +663,7 @@ class TestDatasourceAuthOauthCustomClient: assert status == 200 - def test_post_empty_payload(self, app): + def test_post_empty_payload(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.post) @@ -685,7 +686,7 @@ class TestDatasourceAuthOauthCustomClient: assert status == 200 - def test_post_disabled_flag(self, app): + def test_post_disabled_flag(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.post) @@ -714,7 +715,7 @@ class TestDatasourceAuthOauthCustomClient: class TestDatasourceAuthDefaultApi: - def test_set_default_success(self, app): + def test_set_default_success(self, app: Flask): api = DatasourceAuthDefaultApi() method = unwrap(api.post) @@ -737,7 +738,7 @@ class TestDatasourceAuthDefaultApi: assert status == 200 - def test_default_missing_id(self, app): + def test_default_missing_id(self, app: Flask): api = DatasourceAuthDefaultApi() method = unwrap(api.post) @@ -756,7 +757,7 @@ class TestDatasourceAuthDefaultApi: class TestDatasourceUpdateProviderNameApi: - def test_update_name_success(self, app): + def test_update_name_success(self, app: Flask): api = DatasourceUpdateProviderNameApi() method = unwrap(api.post) @@ -779,7 +780,7 @@ class TestDatasourceUpdateProviderNameApi: assert status == 200 - def test_update_name_too_long(self, app): + def test_update_name_too_long(self, app: Flask): api = DatasourceUpdateProviderNameApi() method = unwrap(api.post) @@ -799,7 +800,7 @@ class TestDatasourceUpdateProviderNameApi: with pytest.raises(ValueError): method(api, "notion") - def test_update_name_missing_credential_id(self, app): + def test_update_name_missing_credential_id(self, app: Flask): api = DatasourceUpdateProviderNameApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py index 7a8ccde55a..d4c6a775ec 100644 --- a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py +++ b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden from controllers.console import console_ns @@ -25,7 +26,7 @@ class TestDataSourceContentPreviewApi: "credential_id": "cred-1", } - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) @@ -66,7 +67,7 @@ class TestDataSourceContentPreviewApi: assert status == 200 assert response == preview_result - def test_post_forbidden_non_account_user(self, app): + def test_post_forbidden_non_account_user(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestDataSourceContentPreviewApi: with pytest.raises(Forbidden): method(api, pipeline, "node-1") - def test_post_invalid_payload(self, app): + def test_post_invalid_payload(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) @@ -108,7 +109,7 @@ class TestDataSourceContentPreviewApi: with pytest.raises(ValueError): method(api, pipeline, "node-1") - def test_post_without_credential_id(self, app): + def test_post_without_credential_id(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets.py index 30b7ab654c..c20e9e7df5 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets.py @@ -2,6 +2,7 @@ import datetime from unittest.mock import MagicMock, PropertyMock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, Forbidden, NotFound import services @@ -61,7 +62,7 @@ class TestDatasetList: user.is_dataset_editor = True return user - def test_get_success_basic(self, app): + def test_get_success_basic(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -96,7 +97,7 @@ class TestDatasetList: assert resp["total"] == 1 assert resp["data"][0]["embedding_available"] is True - def test_get_with_ids_filter(self, app): + def test_get_with_ids_filter(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -131,7 +132,7 @@ class TestDatasetList: assert status == 200 assert resp["total"] == 2 - def test_get_with_tag_ids(self, app): + def test_get_with_tag_ids(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -164,7 +165,7 @@ class TestDatasetList: assert status == 200 - def test_embedding_available_false(self, app): + def test_embedding_available_false(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -206,7 +207,7 @@ class TestDatasetList: assert resp["data"][0]["embedding_available"] is False - def test_partial_members_permission(self, app): + def test_partial_members_permission(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -245,7 +246,7 @@ class TestDatasetList: class TestDatasetListApiPost: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -293,7 +294,7 @@ class TestDatasetListApiPost: assert status == 201 - def test_post_forbidden(self, app): + def test_post_forbidden(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -313,7 +314,7 @@ class TestDatasetListApiPost: with pytest.raises(Forbidden): method(api) - def test_post_duplicate_name(self, app): + def test_post_duplicate_name(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -338,7 +339,7 @@ class TestDatasetListApiPost: with pytest.raises(DatasetNameDuplicateError): method(api) - def test_post_invalid_payload_missing_name(self, app): + def test_post_invalid_payload_missing_name(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -346,7 +347,7 @@ class TestDatasetListApiPost: with pytest.raises(ValueError): method(api) - def test_post_invalid_indexing_technique(self, app): + def test_post_invalid_indexing_technique(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -359,7 +360,7 @@ class TestDatasetListApiPost: with pytest.raises(ValueError, match="Invalid indexing technique"): method(api) - def test_post_invalid_provider(self, app): + def test_post_invalid_provider(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -374,7 +375,7 @@ class TestDatasetListApiPost: class TestDatasetApiGet: - def test_get_success_basic(self, app): + def test_get_success_basic(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -430,7 +431,7 @@ class TestDatasetApiGet: assert status == 200 assert data["embedding_available"] is True - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -451,7 +452,7 @@ class TestDatasetApiGet: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -478,7 +479,7 @@ class TestDatasetApiGet: with pytest.raises(Forbidden, match="no access"): method(api, dataset_id) - def test_get_high_quality_embedding_unavailable(self, app): + def test_get_high_quality_embedding_unavailable(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -533,7 +534,7 @@ class TestDatasetApiGet: assert data["embedding_available"] is False - def test_get_partial_members_permission(self, app): + def test_get_partial_members_permission(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -593,7 +594,7 @@ class TestDatasetApiGet: class TestDatasetApiPatch: - def test_patch_success_basic(self, app): + def test_patch_success_basic(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -662,7 +663,7 @@ class TestDatasetApiPatch: assert status == 200 assert result["partial_member_list"] == [] - def test_patch_dataset_not_found(self, app): + def test_patch_dataset_not_found(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -677,7 +678,7 @@ class TestDatasetApiPatch: with pytest.raises(NotFound, match="Dataset not found"): method(api, "missing") - def test_patch_permission_denied(self, app): + def test_patch_permission_denied(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -707,7 +708,7 @@ class TestDatasetApiPatch: with pytest.raises(Forbidden): method(api, dataset_id) - def test_patch_partial_members_update(self, app): + def test_patch_partial_members_update(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -776,7 +777,7 @@ class TestDatasetApiPatch: assert result["partial_member_list"] == payload["partial_member_list"] - def test_patch_clear_partial_members(self, app): + def test_patch_clear_partial_members(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -846,7 +847,7 @@ class TestDatasetApiPatch: class TestDatasetApiDelete: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -877,7 +878,7 @@ class TestDatasetApiDelete: assert status == 204 assert result == {"result": "success"} - def test_delete_forbidden_no_permission(self, app): + def test_delete_forbidden_no_permission(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -896,7 +897,7 @@ class TestDatasetApiDelete: with pytest.raises(Forbidden): method(api, dataset_id) - def test_delete_dataset_not_found(self, app): + def test_delete_dataset_not_found(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -920,7 +921,7 @@ class TestDatasetApiDelete: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_delete_dataset_in_use(self, app): + def test_delete_dataset_in_use(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -946,7 +947,7 @@ class TestDatasetApiDelete: class TestDatasetUseCheckApi: - def test_get_use_check_true(self, app): + def test_get_use_check_true(self, app: Flask): api = DatasetUseCheckApi() method = unwrap(api.get) @@ -965,7 +966,7 @@ class TestDatasetUseCheckApi: assert status == 200 assert result == {"is_using": True} - def test_get_use_check_false(self, app): + def test_get_use_check_false(self, app: Flask): api = DatasetUseCheckApi() method = unwrap(api.get) @@ -986,7 +987,7 @@ class TestDatasetUseCheckApi: class TestDatasetQueryApi: - def test_get_queries_success(self, app): + def test_get_queries_success(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1030,7 +1031,7 @@ class TestDatasetQueryApi: assert response["has_more"] is False assert len(response["data"]) == 2 - def test_get_queries_dataset_not_found(self, app): + def test_get_queries_dataset_not_found(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1052,7 +1053,7 @@ class TestDatasetQueryApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_get_queries_permission_denied(self, app): + def test_get_queries_permission_denied(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1081,7 +1082,7 @@ class TestDatasetQueryApi: with pytest.raises(Forbidden): method(api, dataset_id) - def test_get_queries_pagination_has_more(self, app): + def test_get_queries_pagination_has_more(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1155,7 +1156,7 @@ class TestDatasetIndexingEstimateApi: "dataset_id": None, } - def test_post_success_upload_file(self, app): + def test_post_success_upload_file(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) @@ -1196,7 +1197,7 @@ class TestDatasetIndexingEstimateApi: assert status == 200 assert response == {"tokens": 100} - def test_post_file_not_found(self, app): + def test_post_file_not_found(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) @@ -1226,7 +1227,7 @@ class TestDatasetIndexingEstimateApi: with pytest.raises(NotFound): method(api) - def test_post_llm_bad_request_error(self, app): + def test_post_llm_bad_request_error(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) mock_file = self._upload_file() @@ -1261,7 +1262,7 @@ class TestDatasetIndexingEstimateApi: with pytest.raises(ProviderNotInitializeError): method(api) - def test_post_provider_token_not_init(self, app): + def test_post_provider_token_not_init(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) mock_file = self._upload_file() @@ -1296,7 +1297,7 @@ class TestDatasetIndexingEstimateApi: with pytest.raises(ProviderNotInitializeError): method(api) - def test_post_generic_exception(self, app): + def test_post_generic_exception(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) mock_file = self._upload_file() @@ -1333,7 +1334,7 @@ class TestDatasetIndexingEstimateApi: class TestDatasetRelatedAppListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1371,7 +1372,7 @@ class TestDatasetRelatedAppListApi: assert response["total"] == 2 assert response["data"] == [app1, app2] - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1389,7 +1390,7 @@ class TestDatasetRelatedAppListApi: with pytest.raises(NotFound): method(api, "dataset-1") - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1413,7 +1414,7 @@ class TestDatasetRelatedAppListApi: with pytest.raises(Forbidden): method(api, "dataset-1") - def test_get_filters_none_apps(self, app): + def test_get_filters_none_apps(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1452,7 +1453,7 @@ class TestDatasetRelatedAppListApi: class TestDatasetIndexingStatusApi: - def test_get_success_with_documents(self, app): + def test_get_success_with_documents(self, app: Flask): api = DatasetIndexingStatusApi() method = unwrap(api.get) @@ -1493,7 +1494,7 @@ class TestDatasetIndexingStatusApi: assert item["completed_segments"] == 3 assert item["total_segments"] == 3 - def test_get_success_no_documents(self, app): + def test_get_success_no_documents(self, app: Flask): api = DatasetIndexingStatusApi() method = unwrap(api.get) @@ -1513,7 +1514,7 @@ class TestDatasetIndexingStatusApi: assert status == 200 assert response == {"data": []} - def test_segment_counts_different_values(self, app): + def test_segment_counts_different_values(self, app: Flask): api = DatasetIndexingStatusApi() method = unwrap(api.get) @@ -1553,7 +1554,7 @@ class TestDatasetIndexingStatusApi: class TestDatasetApiKeyApi: - def test_get_api_keys_success(self, app): + def test_get_api_keys_success(self, app: Flask): api = DatasetApiKeyApi() method = unwrap(api.get) @@ -1590,7 +1591,7 @@ class TestDatasetApiKeyApi: assert response["data"][1]["id"] == "key-2" assert response["data"][1]["token"] == "ds-def" - def test_post_create_api_key_success(self, app): + def test_post_create_api_key_success(self, app: Flask): api = DatasetApiKeyApi() method = unwrap(api.post) @@ -1635,7 +1636,7 @@ class TestDatasetApiKeyApi: assert response["type"] == "dataset" assert response["created_at"] is not None - def test_post_exceed_max_keys(self, app): + def test_post_exceed_max_keys(self, app: Flask): api = DatasetApiKeyApi() method = unwrap(api.post) @@ -1661,7 +1662,7 @@ class TestDatasetApiKeyApi: class TestDatasetApiDeleteApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasetApiDeleteApi() method = unwrap(api.delete) @@ -1691,7 +1692,7 @@ class TestDatasetApiDeleteApi: assert status == 204 assert response["result"] == "success" - def test_delete_key_not_found(self, app): + def test_delete_key_not_found(self, app: Flask): api = DatasetApiDeleteApi() method = unwrap(api.delete) @@ -1711,7 +1712,7 @@ class TestDatasetApiDeleteApi: class TestDatasetEnableApiApi: - def test_enable_api(self, app): + def test_enable_api(self, app: Flask): api = DatasetEnableApiApi() method = unwrap(api.post) @@ -1727,7 +1728,7 @@ class TestDatasetEnableApiApi: assert status == 200 assert response["result"] == "success" - def test_disable_api(self, app): + def test_disable_api(self, app: Flask): api = DatasetEnableApiApi() method = unwrap(api.post) @@ -1745,7 +1746,7 @@ class TestDatasetEnableApiApi: class TestDatasetApiBaseUrlApi: - def test_get_api_base_url_from_config(self, app): + def test_get_api_base_url_from_config(self, app: Flask): api = DatasetApiBaseUrlApi() method = unwrap(api.get) @@ -1760,7 +1761,7 @@ class TestDatasetApiBaseUrlApi: assert response["api_base_url"] == "https://example.com/v1" - def test_get_api_base_url_from_request(self, app): + def test_get_api_base_url_from_request(self, app: Flask): api = DatasetApiBaseUrlApi() method = unwrap(api.get) @@ -1775,7 +1776,7 @@ class TestDatasetApiBaseUrlApi: assert response["api_base_url"] == "http://localhost:5000/v1" - def test_get_api_base_url_no_double_v1(self, app): + def test_get_api_base_url_no_double_v1(self, app: Flask): api = DatasetApiBaseUrlApi() method = unwrap(api.get) @@ -1792,7 +1793,7 @@ class TestDatasetApiBaseUrlApi: class TestDatasetRetrievalSettingApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetRetrievalSettingApi() method = unwrap(api.get) @@ -1813,7 +1814,7 @@ class TestDatasetRetrievalSettingApi: class TestDatasetRetrievalSettingMockApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetRetrievalSettingMockApi() method = unwrap(api.get) @@ -1830,7 +1831,7 @@ class TestDatasetRetrievalSettingMockApi: class TestDatasetErrorDocs: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetErrorDocs() method = unwrap(api.get) @@ -1853,7 +1854,7 @@ class TestDatasetErrorDocs: assert status == 200 assert response["total"] == 1 - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetErrorDocs() method = unwrap(api.get) @@ -1869,7 +1870,7 @@ class TestDatasetErrorDocs: class TestDatasetPermissionUserListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetPermissionUserListApi() method = unwrap(api.get) @@ -1900,7 +1901,7 @@ class TestDatasetPermissionUserListApi: assert status == 200 assert response["data"] == users - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetPermissionUserListApi() method = unwrap(api.get) @@ -1926,7 +1927,7 @@ class TestDatasetPermissionUserListApi: class TestDatasetAutoDisableLogApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetAutoDisableLogApi() method = unwrap(api.get) @@ -1949,7 +1950,7 @@ class TestDatasetAutoDisableLogApi: assert status == 200 assert response == logs - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetAutoDisableLogApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py index d9b02ac453..ff9e1736d2 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, NotFound import services @@ -239,7 +240,7 @@ class TestDatasetDocumentListApi: assert "documents" in response - def test_post_forbidden(self, app): + def test_post_forbidden(self, app: Flask): api = DatasetDocumentListApi() method = unwrap(api.post) @@ -395,7 +396,7 @@ class TestDocumentDownloadApi: class TestDocumentProcessingApi: - def test_processing_forbidden_when_not_editor(self, app): + def test_processing_forbidden_when_not_editor(self, app: Flask): api = DocumentProcessingApi() method = unwrap(api.patch) @@ -1185,7 +1186,7 @@ class TestDocumentPermissionCases: "preview": [], } - def test_document_tenant_mismatch(self, app): + def test_document_tenant_mismatch(self, app: Flask): api = DocumentApi() method = unwrap(api.get) @@ -1253,7 +1254,7 @@ class TestDocumentPermissionCases: assert status == 200 assert response["mode"] == "custom" - def test_process_rule_permission_denied(self, app): + def test_process_rule_permission_denied(self, app: Flask): api = GetProcessRuleApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py index 693b06e95b..66d257ee66 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, NotFound import services @@ -67,7 +68,7 @@ def _segment(): ) -def test_get_segment_with_summary(monkeypatch): +def test_get_segment_with_summary(monkeypatch: pytest.MonkeyPatch): segment = _segment() summary = SimpleNamespace(summary_content="summary") @@ -82,7 +83,7 @@ def test_get_segment_with_summary(monkeypatch): class TestDatasetDocumentSegmentListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -132,7 +133,7 @@ class TestDatasetDocumentSegmentListApi: assert status == 200 - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -150,7 +151,7 @@ class TestDatasetDocumentSegmentListApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -176,7 +177,7 @@ class TestDatasetDocumentSegmentListApi: class TestDatasetDocumentSegmentApi: - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -221,7 +222,7 @@ class TestDatasetDocumentSegmentApi: assert status == 200 assert response["result"] == "success" - def test_patch_document_indexing_in_progress(self, app): + def test_patch_document_indexing_in_progress(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -264,7 +265,7 @@ class TestDatasetDocumentSegmentApi: with pytest.raises(InvalidActionError): method(api, "ds-1", "doc-1", "disable") - def test_patch_llm_bad_request(self, app): + def test_patch_llm_bad_request(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -308,7 +309,7 @@ class TestDatasetDocumentSegmentApi: with pytest.raises(ProviderNotInitializeError): method(api, "ds-1", "doc-1", "enable") - def test_patch_provider_token_not_init(self, app): + def test_patch_provider_token_not_init(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -354,7 +355,7 @@ class TestDatasetDocumentSegmentApi: class TestDatasetDocumentSegmentAddApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -413,7 +414,7 @@ class TestDatasetDocumentSegmentAddApi: assert status == 200 assert response["data"]["id"] == "seg-1" - def test_post_llm_bad_request(self, app): + def test_post_llm_bad_request(self, app: Flask): api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -452,7 +453,7 @@ class TestDatasetDocumentSegmentAddApi: with pytest.raises(ProviderNotInitializeError): method(api, "ds-1", "doc-1") - def test_post_provider_token_not_init(self, app): + def test_post_provider_token_not_init(self, app: Flask): api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -493,7 +494,7 @@ class TestDatasetDocumentSegmentAddApi: class TestDatasetDocumentSegmentUpdateApi: - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = DatasetDocumentSegmentUpdateApi() method = unwrap(api.patch) @@ -551,7 +552,7 @@ class TestDatasetDocumentSegmentUpdateApi: assert status == 200 assert "data" in response - def test_patch_llm_bad_request(self, app): + def test_patch_llm_bad_request(self, app: Flask): api = DatasetDocumentSegmentUpdateApi() method = unwrap(api.patch) @@ -596,7 +597,7 @@ class TestDatasetDocumentSegmentUpdateApi: class TestDatasetDocumentSegmentBatchImportApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -638,7 +639,7 @@ class TestDatasetDocumentSegmentBatchImportApi: assert status == 200 assert response["job_status"] == "waiting" - def test_post_dataset_not_found(self, app): + def test_post_dataset_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -659,7 +660,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_post_document_not_found(self, app): + def test_post_document_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -684,7 +685,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_post_upload_file_not_found(self, app): + def test_post_upload_file_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -713,7 +714,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_post_invalid_file_type(self, app): + def test_post_invalid_file_type(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -745,7 +746,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(ValueError): method(api, "ds-1", "doc-1") - def test_post_async_task_failure(self, app): + def test_post_async_task_failure(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -783,7 +784,7 @@ class TestDatasetDocumentSegmentBatchImportApi: assert status == 500 assert "error" in response - def test_get_job_not_found_in_redis(self, app): + def test_get_job_not_found_in_redis(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.get) @@ -799,7 +800,7 @@ class TestDatasetDocumentSegmentBatchImportApi: class TestChildChunkAddApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = ChildChunkAddApi() method = unwrap(api.post) @@ -852,7 +853,7 @@ class TestChildChunkAddApi: assert status == 200 assert response["data"]["id"] == "cc-1" - def test_post_child_chunk_indexing_error(self, app): + def test_post_child_chunk_indexing_error(self, app: Flask): api = ChildChunkAddApi() method = unwrap(api.post) @@ -897,7 +898,7 @@ class TestChildChunkAddApi: class TestChildChunkUpdateApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = ChildChunkUpdateApi() method = unwrap(api.delete) @@ -941,7 +942,7 @@ class TestChildChunkUpdateApi: assert status == 204 assert response["result"] == "success" - def test_delete_child_chunk_index_error(self, app): + def test_delete_child_chunk_index_error(self, app: Flask): api = ChildChunkUpdateApi() method = unwrap(api.delete) @@ -984,7 +985,7 @@ class TestChildChunkUpdateApi: class TestSegmentListAdvancedCases: - def test_segment_list_with_keyword_filter(self, app): + def test_segment_list_with_keyword_filter(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -1035,7 +1036,7 @@ class TestSegmentListAdvancedCases: assert status == 200 assert response["total"] == 1 - def test_segment_list_permission_denied(self, app): + def test_segment_list_permission_denied(self, app: Flask): """Test segment list with permission denied""" api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -1058,7 +1059,7 @@ class TestSegmentListAdvancedCases: with pytest.raises(Forbidden): method(api, "ds-1", "doc-1") - def test_segment_list_dataset_not_found(self, app): + def test_segment_list_dataset_not_found(self, app: Flask): """Test segment list with dataset not found""" api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -1079,7 +1080,7 @@ class TestSegmentListAdvancedCases: class TestSegmentOperationCases: - def test_segment_add_with_provider_token_error(self, app): + def test_segment_add_with_provider_token_error(self, app: Flask): """Test segment add with provider token not initialized""" api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -1117,7 +1118,7 @@ class TestSegmentOperationCases: with pytest.raises(ProviderTokenNotInitError): method(api, "ds-1", "doc-1") - def test_batch_import_with_document_not_found(self, app): + def test_batch_import_with_document_not_found(self, app: Flask): """Test batch import with document not found""" api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -1146,7 +1147,7 @@ class TestSegmentOperationCases: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_batch_import_with_invalid_file(self, app): + def test_batch_import_with_invalid_file(self, app: Flask): """Test batch import with invalid file type""" api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -1181,7 +1182,7 @@ class TestSegmentOperationCases: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_batch_import_with_async_task_failure(self, app): + def test_batch_import_with_async_task_failure(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -1226,7 +1227,7 @@ class TestSegmentOperationCases: assert status == 500 assert "error" in response - def test_batch_import_get_job_not_found(self, app): + def test_batch_import_get_job_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_external.py b/api/tests/unit_tests/controllers/console/datasets/test_external.py index 514bbbe040..7254bf7670 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_external.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_external.py @@ -57,7 +57,7 @@ def mock_auth(monkeypatch, current_user): class TestExternalApiTemplateListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ExternalApiTemplateListApi() method = unwrap(api.get) @@ -78,7 +78,7 @@ class TestExternalApiTemplateListApi: assert resp["total"] == 1 assert resp["data"][0]["id"] == "1" - def test_post_forbidden(self, app, current_user): + def test_post_forbidden(self, app: Flask, current_user): current_user.is_dataset_editor = False api = ExternalApiTemplateListApi() method = unwrap(api.post) @@ -93,7 +93,7 @@ class TestExternalApiTemplateListApi: with pytest.raises(Forbidden): method(api) - def test_post_duplicate_name(self, app): + def test_post_duplicate_name(self, app: Flask): api = ExternalApiTemplateListApi() method = unwrap(api.post) @@ -114,7 +114,7 @@ class TestExternalApiTemplateListApi: class TestExternalApiTemplateApi: - def test_get_not_found(self, app): + def test_get_not_found(self, app: Flask): api = ExternalApiTemplateApi() method = unwrap(api.get) @@ -129,7 +129,7 @@ class TestExternalApiTemplateApi: with pytest.raises(NotFound): method(api, "api-id") - def test_delete_forbidden(self, app, current_user): + def test_delete_forbidden(self, app: Flask, current_user): current_user.has_edit_permission = False current_user.is_dataset_operator = False @@ -142,7 +142,7 @@ class TestExternalApiTemplateApi: class TestExternalApiUseCheckApi: - def test_get_scopes_usage_check_to_current_tenant(self, app): + def test_get_scopes_usage_check_to_current_tenant(self, app: Flask): api = ExternalApiUseCheckApi() method = unwrap(api.get) @@ -162,7 +162,7 @@ class TestExternalApiUseCheckApi: class TestExternalDatasetCreateApi: - def test_create_success(self, app): + def test_create_success(self, app: Flask): api = ExternalDatasetCreateApi() method = unwrap(api.post) @@ -206,7 +206,7 @@ class TestExternalDatasetCreateApi: assert status == 201 - def test_create_forbidden(self, app, current_user): + def test_create_forbidden(self, app: Flask, current_user): current_user.is_dataset_editor = False api = ExternalDatasetCreateApi() method = unwrap(api.post) @@ -226,7 +226,7 @@ class TestExternalDatasetCreateApi: class TestExternalKnowledgeHitTestingApi: - def test_hit_testing_dataset_not_found(self, app): + def test_hit_testing_dataset_not_found(self, app: Flask): api = ExternalKnowledgeHitTestingApi() method = unwrap(api.post) @@ -241,7 +241,7 @@ class TestExternalKnowledgeHitTestingApi: with pytest.raises(NotFound): method(api, "dataset-id") - def test_hit_testing_success(self, app): + def test_hit_testing_success(self, app: Flask): api = ExternalKnowledgeHitTestingApi() method = unwrap(api.post) @@ -266,7 +266,7 @@ class TestExternalKnowledgeHitTestingApi: class TestBedrockRetrievalApi: - def test_bedrock_retrieval(self, app): + def test_bedrock_retrieval(self, app: Flask): api = BedrockRetrievalApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py index 09ed2aaf69..4fa5d21493 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -35,7 +36,7 @@ def dataset(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass all decorators on the API method.""" mocker.patch( "controllers.console.datasets.hit_testing.setup_required", @@ -56,7 +57,7 @@ def bypass_decorators(mocker): class TestHitTestingApi: - def test_hit_testing_success(self, app, dataset, dataset_id): + def test_hit_testing_success(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -99,7 +100,7 @@ class TestHitTestingApi: assert "records" in result assert result["records"] == [] - def test_hit_testing_success_with_optional_record_fields(self, app, dataset, dataset_id): + def test_hit_testing_success_with_optional_record_fields(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestHitTestingApi: assert result["query"] == payload["query"] assert result["records"] == records - def test_hit_testing_dataset_not_found(self, app, dataset_id): + def test_hit_testing_dataset_not_found(self, app: Flask, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -175,7 +176,7 @@ class TestHitTestingApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_hit_testing_invalid_args(self, app, dataset, dataset_id): + def test_hit_testing_invalid_args(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py index de834c2d4d..4042190ff6 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -60,7 +61,7 @@ def metadata_id(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass setup/login/license decorators.""" mocker.patch( "controllers.console.datasets.metadata.setup_required", @@ -269,7 +270,7 @@ class TestDatasetMetadataApi: class TestDatasetMetadataBuiltInFieldApi: - def test_get_built_in_fields(self, app): + def test_get_built_in_fields(self, app: Flask): api = DatasetMetadataBuiltInFieldApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_website.py b/api/tests/unit_tests/controllers/console/datasets/test_website.py index 9f0da6e76f..9991a0d345 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_website.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_website.py @@ -2,6 +2,7 @@ from unittest.mock import Mock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from controllers.console import console_ns from controllers.console.datasets.error import WebsiteCrawlError @@ -31,7 +32,7 @@ def app(): @pytest.fixture(autouse=True) -def bypass_auth_and_setup(mocker): +def bypass_auth_and_setup(mocker: MockerFixture): """Bypass setup/login/account decorators.""" mocker.patch( "controllers.console.datasets.website.login_required", @@ -48,7 +49,7 @@ def bypass_auth_and_setup(mocker): class TestWebsiteCrawlApi: - def test_crawl_success(self, app, mocker): + def test_crawl_success(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestWebsiteCrawlApi: assert status == 200 assert result["job_id"] == "job-1" - def test_crawl_invalid_payload(self, app, mocker): + def test_crawl_invalid_payload(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -113,7 +114,7 @@ class TestWebsiteCrawlApi: with pytest.raises(WebsiteCrawlError, match="invalid payload"): method(api) - def test_crawl_service_error(self, app, mocker): + def test_crawl_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestWebsiteCrawlApi: class TestWebsiteCrawlStatusApi: - def test_get_status_success(self, app, mocker): + def test_get_status_success(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -181,7 +182,7 @@ class TestWebsiteCrawlStatusApi: assert status == 200 assert result["status"] == "completed" - def test_get_status_invalid_provider(self, app, mocker): + def test_get_status_invalid_provider(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -203,7 +204,7 @@ class TestWebsiteCrawlStatusApi: with pytest.raises(WebsiteCrawlError, match="invalid provider"): method(api, job_id) - def test_get_status_service_error(self, app, mocker): + def test_get_status_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py index e358435de4..2cfa938af8 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py @@ -1,6 +1,7 @@ from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from controllers.console.datasets.error import PipelineNotFoundError from controllers.console.datasets.wraps import get_rag_pipeline @@ -16,7 +17,7 @@ class TestGetRagPipeline: with pytest.raises(ValueError, match="missing pipeline_id"): dummy_view() - def test_pipeline_not_found(self, mocker): + def test_pipeline_not_found(self, mocker: MockerFixture): @get_rag_pipeline def dummy_view(**kwargs): return "ok" @@ -34,7 +35,7 @@ class TestGetRagPipeline: with pytest.raises(PipelineNotFoundError): dummy_view(pipeline_id="pipeline-1") - def test_pipeline_found_and_injected(self, mocker): + def test_pipeline_found_and_injected(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) pipeline.id = "pipeline-1" pipeline.tenant_id = "tenant-1" @@ -57,7 +58,7 @@ class TestGetRagPipeline: assert result is pipeline - def test_pipeline_id_removed_from_kwargs(self, mocker): + def test_pipeline_id_removed_from_kwargs(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline @@ -79,7 +80,7 @@ class TestGetRagPipeline: assert result == "ok" - def test_pipeline_id_cast_to_string(self, mocker): + def test_pipeline_id_cast_to_string(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline diff --git a/api/tests/unit_tests/controllers/console/explore/test_banner.py b/api/tests/unit_tests/controllers/console/explore/test_banner.py index c8f674f515..d1cb6b6a03 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_banner.py +++ b/api/tests/unit_tests/controllers/console/explore/test_banner.py @@ -1,6 +1,8 @@ from datetime import datetime from unittest.mock import MagicMock, patch +from flask import Flask + import controllers.console.explore.banner as banner_module from models.enums import BannerStatus @@ -12,7 +14,7 @@ def unwrap(func): class TestBannerApi: - def test_get_banners_with_requested_language(self, app): + def test_get_banners_with_requested_language(self, app: Flask): api = banner_module.BannerApi() method = unwrap(api.get) @@ -41,7 +43,7 @@ class TestBannerApi: } ] - def test_get_banners_fallback_to_en_us(self, app): + def test_get_banners_fallback_to_en_us(self, app: Flask): api = banner_module.BannerApi() method = unwrap(api.get) @@ -76,7 +78,7 @@ class TestBannerApi: } ] - def test_get_banners_default_language_en_us(self, app): + def test_get_banners_default_language_en_us(self, app: Flask): api = banner_module.BannerApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/explore/test_message.py b/api/tests/unit_tests/controllers/console/explore/test_message.py index 145cc9cdd7..3d41489435 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_message.py +++ b/api/tests/unit_tests/controllers/console/explore/test_message.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import InternalServerError, NotFound import controllers.console.explore.message as module @@ -54,7 +55,7 @@ def make_message(): class TestMessageListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.MessageListApi() method = unwrap(api.get) @@ -96,7 +97,7 @@ class TestMessageListApi: with pytest.raises(NotChatAppError): method(installed_app) - def test_conversation_not_exists(self, app): + def test_conversation_not_exists(self, app: Flask): api = module.MessageListApi() method = unwrap(api.get) @@ -118,7 +119,7 @@ class TestMessageListApi: with pytest.raises(NotFound): method(installed_app) - def test_first_message_not_exists(self, app): + def test_first_message_not_exists(self, app: Flask): api = module.MessageListApi() method = unwrap(api.get) @@ -142,7 +143,7 @@ class TestMessageListApi: class TestMessageFeedbackApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = module.MessageFeedbackApi() method = unwrap(api.post) @@ -161,7 +162,7 @@ class TestMessageFeedbackApi: assert result["result"] == "success" - def test_message_not_exists(self, app): + def test_message_not_exists(self, app: Flask): api = module.MessageFeedbackApi() method = unwrap(api.post) @@ -182,7 +183,7 @@ class TestMessageFeedbackApi: class TestMessageMoreLikeThisApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -221,7 +222,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(NotCompletionAppError): method(installed_app, "mid") - def test_more_like_this_disabled(self, app): + def test_more_like_this_disabled(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -243,7 +244,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(AppMoreLikeThisDisabledError): method(installed_app, "mid") - def test_message_not_exists_more_like_this(self, app): + def test_message_not_exists_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -265,7 +266,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(NotFound): method(installed_app, "mid") - def test_provider_not_init_more_like_this(self, app): + def test_provider_not_init_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -287,7 +288,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(ProviderNotInitializeError): method(installed_app, "mid") - def test_quota_exceeded_more_like_this(self, app): + def test_quota_exceeded_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -309,7 +310,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(ProviderQuotaExceededError): method(installed_app, "mid") - def test_model_not_support_more_like_this(self, app): + def test_model_not_support_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -331,7 +332,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(ProviderModelCurrentlyNotSupportError): method(installed_app, "mid") - def test_invoke_error_more_like_this(self, app): + def test_invoke_error_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -353,7 +354,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(CompletionRequestError): method(installed_app, "mid") - def test_unexpected_error_more_like_this(self, app): + def test_unexpected_error_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py b/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py index 76c863577a..89cbea5ddc 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py +++ b/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock, patch +from flask import Flask + import controllers.console.explore.recommended_app as module from models.model import AppMode, IconType @@ -11,7 +13,7 @@ def unwrap(func): class TestRecommendedAppListApi: - def test_get_with_language_param(self, app): + def test_get_with_language_param(self, app: Flask): api = module.RecommendedAppListApi() method = unwrap(api.get) @@ -31,7 +33,7 @@ class TestRecommendedAppListApi: service_mock.assert_called_once_with("en-US") assert result == result_data - def test_get_fallback_to_user_language(self, app): + def test_get_fallback_to_user_language(self, app: Flask): api = module.RecommendedAppListApi() method = unwrap(api.get) @@ -51,7 +53,7 @@ class TestRecommendedAppListApi: service_mock.assert_called_once_with("fr-FR") assert result == result_data - def test_get_fallback_to_default_language(self, app): + def test_get_fallback_to_default_language(self, app: Flask): api = module.RecommendedAppListApi() method = unwrap(api.get) @@ -73,7 +75,7 @@ class TestRecommendedAppListApi: class TestRecommendedAppApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.RecommendedAppApi() method = unwrap(api.get) @@ -124,7 +126,7 @@ class TestRecommendedAppResponseModels: }, "app_id": "app-1", "description": "desc", - "category": "cat", + "categories": ["cat", "other"], "position": 1, "is_listed": True, "can_trial": False, @@ -135,4 +137,5 @@ class TestRecommendedAppResponseModels: ).model_dump(mode="json") assert response["recommended_apps"][0]["app_id"] == "app-1" + assert response["recommended_apps"][0]["categories"] == ["cat", "other"] assert response["categories"] == ["cat"] diff --git a/api/tests/unit_tests/controllers/console/explore/test_saved_message.py b/api/tests/unit_tests/controllers/console/explore/test_saved_message.py index bb7cdd55c4..71241890e9 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_saved_message.py +++ b/api/tests/unit_tests/controllers/console/explore/test_saved_message.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock, PropertyMock, patch from uuid import uuid4 import pytest +from flask import Flask from werkzeug.exceptions import NotFound import controllers.console.explore.saved_message as module @@ -42,7 +43,7 @@ def payload_patch(): class TestSavedMessageListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.SavedMessageListApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/explore/test_trial.py b/api/tests/unit_tests/controllers/console/explore/test_trial.py index 3625056af9..14f00e6295 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_trial.py +++ b/api/tests/unit_tests/controllers/console/explore/test_trial.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, InternalServerError, NotFound import controllers.console.explore.trial as module @@ -88,7 +89,7 @@ def valid_parameters(): class TestTrialAppWorkflowRunApi: - def test_not_workflow_app(self, app): + def test_not_workflow_app(self, app: Flask): api = module.TrialAppWorkflowRunApi() method = unwrap(api.post) @@ -224,7 +225,7 @@ class TestTrialAppWorkflowRunApi: class TestTrialChatApi: - def test_not_chat_app(self, app): + def test_not_chat_app(self, app: Flask): api = module.TrialChatApi() method = unwrap(api.post) @@ -408,7 +409,7 @@ class TestTrialChatApi: class TestTrialCompletionApi: - def test_not_completion_app(self, app): + def test_not_completion_app(self, app: Flask): api = module.TrialCompletionApi() method = unwrap(api.post) @@ -560,7 +561,7 @@ class TestTrialCompletionApi: class TestTrialMessageSuggestedQuestionApi: - def test_not_chat_app(self, app): + def test_not_chat_app(self, app: Flask): api = module.TrialMessageSuggestedQuestionApi() method = unwrap(api.get) @@ -952,7 +953,7 @@ class TestTrialAppWorkflowTaskStopApi: class TestTrialSitApi: - def test_no_site(self, app): + def test_no_site(self, app: Flask): api = module.TrialSitApi() method = unwrap(api.get) app_model = MagicMock() @@ -963,7 +964,7 @@ class TestTrialSitApi: with pytest.raises(Forbidden): method(api, app_model) - def test_archived_tenant(self, app): + def test_archived_tenant(self, app: Flask): api = module.TrialSitApi() method = unwrap(api.get) @@ -978,7 +979,7 @@ class TestTrialSitApi: with pytest.raises(Forbidden): method(api, app_model) - def test_success(self, app): + def test_success(self, app: Flask): api = module.TrialSitApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/tag/test_tags.py b/api/tests/unit_tests/controllers/console/tag/test_tags.py index 6405558bb4..8b47da25fb 100644 --- a/api/tests/unit_tests/controllers/console/tag/test_tags.py +++ b/api/tests/unit_tests/controllers/console/tag/test_tags.py @@ -8,10 +8,8 @@ from werkzeug.exceptions import Forbidden import controllers.console.tag.tags as module from controllers.console import console_ns from controllers.console.tag.tags import ( - DeprecatedTagBindingCreateApi, - DeprecatedTagBindingRemoveApi, TagBindingCollectionApi, - TagBindingItemApi, + TagBindingRemoveApi, TagListApi, TagUpdateDeleteApi, ) @@ -75,7 +73,7 @@ def payload_patch(): class TestTagListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = TagListApi() method = unwrap(api.get) @@ -126,7 +124,7 @@ class TestTagListApi: assert result["name"] == "test-tag" assert result["binding_count"] == "0" - def test_post_forbidden(self, app, readonly_user, payload_patch): + def test_post_forbidden(self, app: Flask, readonly_user, payload_patch): api = TagListApi() method = unwrap(api.post) @@ -172,7 +170,7 @@ class TestTagUpdateDeleteApi: assert status == 200 assert result["binding_count"] == "3" - def test_patch_forbidden(self, app, readonly_user, payload_patch): + def test_patch_forbidden(self, app: Flask, readonly_user, payload_patch): api = TagUpdateDeleteApi() method = unwrap(api.patch) @@ -233,7 +231,7 @@ class TestTagBindingCollectionApi: assert status == 200 assert result["result"] == "success" - def test_create_forbidden(self, app, readonly_user, payload_patch): + def test_create_forbidden(self, app: Flask, readonly_user, payload_patch): api = TagBindingCollectionApi() method = unwrap(api.post) @@ -249,39 +247,13 @@ class TestTagBindingCollectionApi: method(api) -class TestDeprecatedTagBindingCreateApi: - def test_create_success(self, app, admin_user, payload_patch): - api = DeprecatedTagBindingCreateApi() +class TestTagBindingRemoveApi: + def test_remove_success(self, app, admin_user, payload_patch): + api = TagBindingRemoveApi() method = unwrap(api.post) payload = { - "tag_ids": ["tag-1"], - "target_id": "target-1", - "type": "knowledge", - } - - with app.test_request_context("/", json=payload): - with ( - patch( - "controllers.console.tag.tags.current_account_with_tenant", - return_value=(admin_user, None), - ), - payload_patch(payload), - patch("controllers.console.tag.tags.TagService.save_tag_binding") as save_mock, - ): - result, status = method(api) - - save_mock.assert_called_once() - assert status == 200 - assert result["result"] == "success" - - -class TestTagBindingItemApi: - def test_delete_success(self, app, admin_user, payload_patch): - api = TagBindingItemApi() - method = unwrap(api.delete) - - payload = { + "tag_ids": ["tag-1", "tag-2"], "target_id": "target-1", "type": "knowledge", } @@ -295,57 +267,16 @@ class TestTagBindingItemApi: payload_patch(payload), patch("controllers.console.tag.tags.TagService.delete_tag_binding") as delete_mock, ): - result, status = method(api, "tag-1") + result, status = method(api) delete_mock.assert_called_once() delete_payload = delete_mock.call_args.args[0] - assert delete_payload.tag_id == "tag-1" - assert delete_payload.target_id == "target-1" - assert delete_payload.type == TagType.KNOWLEDGE + assert delete_payload.tag_ids == ["tag-1", "tag-2"] assert status == 200 assert result["result"] == "success" - def test_delete_forbidden(self, app, readonly_user): - api = TagBindingItemApi() - method = unwrap(api.delete) - - with app.test_request_context("/"): - with patch( - "controllers.console.tag.tags.current_account_with_tenant", - return_value=(readonly_user, None), - ): - with pytest.raises(Forbidden): - method(api, "tag-1") - - -class TestDeprecatedTagBindingRemoveApi: - def test_remove_success(self, app, admin_user, payload_patch): - api = DeprecatedTagBindingRemoveApi() - method = unwrap(api.post) - - payload = { - "tag_id": "tag-1", - "target_id": "target-1", - "type": "knowledge", - } - - with app.test_request_context("/", json=payload): - with ( - patch( - "controllers.console.tag.tags.current_account_with_tenant", - return_value=(admin_user, None), - ), - payload_patch(payload), - patch("controllers.console.tag.tags.TagService.delete_tag_binding") as delete_mock, - ): - result, status = method(api) - - delete_mock.assert_called_once() - assert status == 200 - assert result["result"] == "success" - - def test_remove_forbidden(self, app, readonly_user, payload_patch): - api = DeprecatedTagBindingRemoveApi() + def test_remove_forbidden(self, app: Flask, readonly_user, payload_patch): + api = TagBindingRemoveApi() method = unwrap(api.post) with app.test_request_context("/", json={}): @@ -371,32 +302,30 @@ class TestTagResponseModel: class TestTagBindingRouteMetadata: - def test_legacy_write_routes_are_marked_deprecated(self): - assert DeprecatedTagBindingCreateApi.post.__apidoc__["deprecated"] is True - assert DeprecatedTagBindingRemoveApi.post.__apidoc__["deprecated"] is True + def test_write_routes_are_not_deprecated(self): assert TagBindingCollectionApi.post.__apidoc__.get("deprecated") is not True - assert TagBindingItemApi.delete.__apidoc__.get("deprecated") is not True + assert TagBindingRemoveApi.post.__apidoc__.get("deprecated") is not True def test_write_routes_have_stable_operation_ids(self): assert TagBindingCollectionApi.post.__apidoc__["id"] == "create_tag_binding" - assert TagBindingItemApi.delete.__apidoc__["id"] == "delete_tag_binding" - assert DeprecatedTagBindingCreateApi.post.__apidoc__["id"] == "create_tag_binding_deprecated" - assert DeprecatedTagBindingRemoveApi.post.__apidoc__["id"] == "delete_tag_binding_deprecated" + assert TagBindingRemoveApi.post.__apidoc__["id"] == "remove_tag_bindings" - def test_canonical_and_legacy_write_routes_are_registered(self): + def test_write_routes_are_registered(self): route_map = { resource.__name__: urls for resource, urls, _route_doc, _kwargs in console_ns.resources if resource.__name__ in { "TagBindingCollectionApi", - "TagBindingItemApi", - "DeprecatedTagBindingCreateApi", - "DeprecatedTagBindingRemoveApi", + "TagBindingRemoveApi", } } assert route_map["TagBindingCollectionApi"] == ("/tag-bindings",) - assert route_map["TagBindingItemApi"] == ("/tag-bindings/",) - assert route_map["DeprecatedTagBindingCreateApi"] == ("/tag-bindings/create",) - assert route_map["DeprecatedTagBindingRemoveApi"] == ("/tag-bindings/remove",) + assert route_map["TagBindingRemoveApi"] == ("/tag-bindings/remove",) + + def test_legacy_write_routes_are_not_registered(self): + urls = {url for _resource, resource_urls, _route_doc, _kwargs in console_ns.resources for url in resource_urls} + + assert "/tag-bindings/create" not in urls + assert "/tag-bindings/" not in urls diff --git a/api/tests/unit_tests/controllers/console/test_admin.py b/api/tests/unit_tests/controllers/console/test_admin.py index 16197fcd0c..27f332ac51 100644 --- a/api/tests/unit_tests/controllers/console/test_admin.py +++ b/api/tests/unit_tests/controllers/console/test_admin.py @@ -4,6 +4,7 @@ import uuid from unittest.mock import Mock, PropertyMock, patch import pytest +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound, Unauthorized from controllers.console.admin import ( @@ -18,7 +19,7 @@ from models.model import App, InstalledApp, RecommendedApp @pytest.fixture(autouse=True) -def bypass_only_edition_cloud(mocker): +def bypass_only_edition_cloud(mocker: MockerFixture): """ Bypass only_edition_cloud decorator by setting EDITION to "CLOUD". """ @@ -29,7 +30,7 @@ def bypass_only_edition_cloud(mocker): @pytest.fixture -def mock_admin_auth(mocker): +def mock_admin_auth(mocker: MockerFixture): """ Provide valid admin authentication for controller tests. """ @@ -44,7 +45,7 @@ def mock_admin_auth(mocker): @pytest.fixture -def mock_console_payload(mocker): +def mock_console_payload(mocker: MockerFixture): payload = { "app_id": str(uuid.uuid4()), "language": "en-US", @@ -62,7 +63,7 @@ def mock_console_payload(mocker): @pytest.fixture -def mock_banner_payload(mocker): +def mock_banner_payload(mocker: MockerFixture): mocker.patch( "flask_restx.namespace.Namespace.payload", new_callable=PropertyMock, @@ -78,7 +79,7 @@ def mock_banner_payload(mocker): @pytest.fixture -def mock_session_factory(mocker): +def mock_session_factory(mocker: MockerFixture): mock_session = Mock() mock_session.execute = Mock() mock_session.add = Mock() @@ -97,7 +98,7 @@ class TestDeleteExploreBannerApi: def setup_method(self): self.api = DeleteExploreBannerApi() - def test_delete_banner_not_found(self, mocker, mock_admin_auth): + def test_delete_banner_not_found(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -106,7 +107,7 @@ class TestDeleteExploreBannerApi: with pytest.raises(NotFound, match="is not found"): self.api.delete(uuid.uuid4()) - def test_delete_banner_success(self, mocker, mock_admin_auth): + def test_delete_banner_success(self, mocker: MockerFixture, mock_admin_auth): mock_banner = Mock() mocker.patch( @@ -126,7 +127,7 @@ class TestInsertExploreBannerApi: def setup_method(self): self.api = InsertExploreBannerApi() - def test_insert_banner_success(self, mocker, mock_admin_auth, mock_banner_payload): + def test_insert_banner_success(self, mocker: MockerFixture, mock_admin_auth, mock_banner_payload): mocker.patch("controllers.console.admin.db.session.add") mocker.patch("controllers.console.admin.db.session.commit") @@ -168,7 +169,7 @@ class TestInsertExploreAppApiDelete: def setup_method(self): self.api = InsertExploreAppApi() - def test_delete_when_not_in_explore(self, mocker, mock_admin_auth): + def test_delete_when_not_in_explore(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.session_factory.create_session", return_value=Mock( @@ -183,7 +184,7 @@ class TestInsertExploreAppApiDelete: assert status == 204 assert response["result"] == "success" - def test_delete_when_in_explore_with_trial_app(self, mocker, mock_admin_auth): + def test_delete_when_in_explore_with_trial_app(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app from explore that has a trial app.""" app_id = uuid.uuid4() @@ -225,7 +226,7 @@ class TestInsertExploreAppApiDelete: assert response["result"] == "success" assert mock_app.is_public is False - def test_delete_with_installed_apps(self, mocker, mock_admin_auth): + def test_delete_with_installed_apps(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app that has installed apps in other tenants.""" app_id = uuid.uuid4() @@ -270,7 +271,7 @@ class TestInsertExploreAppListApi: def setup_method(self): self.api = InsertExploreAppListApi() - def test_app_not_found(self, mocker, mock_admin_auth, mock_console_payload): + def test_app_not_found(self, mocker: MockerFixture, mock_admin_auth, mock_console_payload): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -281,7 +282,7 @@ class TestInsertExploreAppListApi: def test_create_recommended_app( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, ): @@ -318,7 +319,9 @@ class TestInsertExploreAppListApi: assert response["result"] == "success" assert mock_app.is_public is True - def test_update_recommended_app(self, mocker, mock_admin_auth, mock_console_payload, mock_session_factory): + def test_update_recommended_app( + self, mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory + ): mock_app = Mock(spec=App) mock_app.id = "app-id" mock_app.site = None @@ -344,7 +347,7 @@ class TestInsertExploreAppListApi: def test_site_data_overrides_payload( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -381,7 +384,7 @@ class TestInsertExploreAppListApi: def test_create_trial_app_when_can_trial_enabled( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -413,7 +416,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_with_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -450,7 +453,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_without_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, diff --git a/api/tests/unit_tests/controllers/console/test_feature.py b/api/tests/unit_tests/controllers/console/test_feature.py index d8debc1f2c..1711aede61 100644 --- a/api/tests/unit_tests/controllers/console/test_feature.py +++ b/api/tests/unit_tests/controllers/console/test_feature.py @@ -1,3 +1,4 @@ +from pytest_mock import MockerFixture from werkzeug.exceptions import Unauthorized @@ -11,7 +12,7 @@ def unwrap(func): class TestFeatureApi: - def test_get_tenant_features_success(self, mocker): + def test_get_tenant_features_success(self, mocker: MockerFixture): from controllers.console.feature import FeatureApi mocker.patch( @@ -32,7 +33,7 @@ class TestFeatureApi: class TestSystemFeatureApi: - def test_get_system_features_authenticated(self, mocker): + def test_get_system_features_authenticated(self, mocker: MockerFixture): """ current_user.is_authenticated == True """ @@ -56,7 +57,7 @@ class TestSystemFeatureApi: assert result == {"features": {"sys_feature": True}} - def test_get_system_features_unauthenticated(self, mocker): + def test_get_system_features_unauthenticated(self, mocker: MockerFixture): """ current_user.is_authenticated raises Unauthorized """ diff --git a/api/tests/unit_tests/controllers/console/test_files.py b/api/tests/unit_tests/controllers/console/test_files.py index 5df9daa7f8..eebc6f9d60 100644 --- a/api/tests/unit_tests/controllers/console/test_files.py +++ b/api/tests/unit_tests/controllers/console/test_files.py @@ -82,7 +82,7 @@ def mock_file_service(mock_db): class TestFileApiGet: - def test_get_upload_config(self, app): + def test_get_upload_config(self, app: Flask): api = FileApi() get_method = unwrap(api.get) @@ -290,7 +290,7 @@ class TestFilePreviewApi: class TestFileSupportTypeApi: - def test_get_supported_types(self, app): + def test_get_supported_types(self, app: Flask): api = FileSupportTypeApi() get_method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/test_workspace_account.py b/api/tests/unit_tests/controllers/console/test_workspace_account.py index 0b1a32581a..4b4f968c8f 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_account.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_account.py @@ -58,7 +58,7 @@ class TestChangeEmailSend: mock_get_change_data, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("current@example.com", "acc1") @@ -107,7 +107,7 @@ class TestChangeEmailSend: mock_get_change_data, mock_current_account, mock_db, - app, + app: Flask, ): """GHSA-4q3w-q5mc-45rq: a phase-1 token must not unlock the new-email send step.""" from controllers.console.auth.error import InvalidTokenError @@ -155,7 +155,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("user@example.com", "acc2") @@ -214,7 +214,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) @@ -267,7 +267,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): """A token whose phase marker is a string but not a known transition must be rejected.""" from controllers.console.auth.error import InvalidTokenError @@ -316,7 +316,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): """A token minted without a phase marker (e.g. a hand-crafted token) must not validate.""" from controllers.console.auth.error import InvalidTokenError @@ -366,7 +366,7 @@ class TestChangeEmailReset: mock_send_notify, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") @@ -418,7 +418,7 @@ class TestChangeEmailReset: mock_send_notify, mock_current_account, mock_db, - app, + app: Flask, ): """GHSA-4q3w-q5mc-45rq PoC: phase-1 token must not be usable against /reset.""" from controllers.console.auth.error import InvalidTokenError @@ -471,7 +471,7 @@ class TestChangeEmailReset: mock_send_notify, mock_current_account, mock_db, - app, + app: Flask, ): """A verified token for address A must not be replayed to change to address B.""" from controllers.console.auth.error import InvalidTokenError @@ -547,7 +547,7 @@ class TestAccountServiceSendChangeEmailEmail: class TestAccountDeletionFeedback: @patch("controllers.console.wraps.db") @patch("controllers.console.workspace.account.BillingService.update_account_deletion_feedback") - def test_should_normalize_feedback_email(self, mock_update, mock_db, app): + def test_should_normalize_feedback_email(self, mock_update, mock_db, app: Flask): with app.test_request_context( "/account/delete/feedback", method="POST", @@ -563,7 +563,7 @@ class TestCheckEmailUnique: @patch("controllers.console.wraps.db") @patch("controllers.console.workspace.account.AccountService.check_email_unique") @patch("controllers.console.workspace.account.AccountService.is_account_in_freeze") - def test_should_normalize_email(self, mock_is_freeze, mock_check_unique, mock_db, app): + def test_should_normalize_email(self, mock_is_freeze, mock_check_unique, mock_db, app: Flask): mock_is_freeze.return_value = False mock_check_unique.return_value = True diff --git a/api/tests/unit_tests/controllers/console/test_workspace_members.py b/api/tests/unit_tests/controllers/console/test_workspace_members.py index 811bf5b1e7..412d6a6c52 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_members.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_members.py @@ -43,7 +43,7 @@ class TestMemberInviteEmailApi: mock_current_account, mock_invite_member, mock_get_features, - app, + app: Flask, ): mock_get_features.return_value = _build_feature_flags() mock_invite_member.return_value = "token-abc" diff --git a/api/tests/unit_tests/controllers/console/workspace/test_accounts.py b/api/tests/unit_tests/controllers/console/workspace/test_accounts.py index bbe9d09521..064726da05 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_accounts.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_accounts.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -41,7 +42,7 @@ def unwrap(func): class TestAccountInitApi: - def test_init_success(self, app): + def test_init_success(self, app: Flask): api = AccountInitApi() method = unwrap(api.post) @@ -64,7 +65,7 @@ class TestAccountInitApi: assert resp["result"] == "success" - def test_init_already_initialized(self, app): + def test_init_already_initialized(self, app: Flask): api = AccountInitApi() method = unwrap(api.post) @@ -79,7 +80,7 @@ class TestAccountInitApi: class TestAccountProfileApi: - def test_get_profile_success(self, app): + def test_get_profile_success(self, app: Flask): api = AccountProfileApi() method = unwrap(api.get) @@ -140,7 +141,7 @@ class TestAccountUpdateApis: class TestAccountAvatarApiGet: """GET /account/avatar must not sign arbitrary upload_file IDs (IDOR).""" - def test_get_avatar_signed_url_when_upload_owned_by_current_account(self, app): + def test_get_avatar_signed_url_when_upload_owned_by_current_account(self, app: Flask): api = AccountAvatarApi() method = unwrap(api.get) @@ -172,7 +173,7 @@ class TestAccountAvatarApiGet: assert result == {"avatar_url": "https://signed/example"} sign_mock.assert_called_once_with(upload_file_id=file_id) - def test_get_avatar_not_found_when_upload_created_by_other_account_same_tenant(self, app): + def test_get_avatar_not_found_when_upload_created_by_other_account_same_tenant(self, app: Flask): api = AccountAvatarApi() method = unwrap(api.get) @@ -204,7 +205,7 @@ class TestAccountAvatarApiGet: sign_mock.assert_not_called() - def test_get_avatar_not_found_when_upload_belongs_to_other_tenant(self, app): + def test_get_avatar_not_found_when_upload_belongs_to_other_tenant(self, app: Flask): api = AccountAvatarApi() method = unwrap(api.get) @@ -236,7 +237,7 @@ class TestAccountAvatarApiGet: sign_mock.assert_not_called() - def test_get_avatar_https_pass_through_without_signing(self, app): + def test_get_avatar_https_pass_through_without_signing(self, app: Flask): api = AccountAvatarApi() method = unwrap(api.get) @@ -263,7 +264,7 @@ class TestAccountAvatarApiGet: class TestAccountPasswordApi: - def test_password_success(self, app): + def test_password_success(self, app: Flask): api = AccountPasswordApi() method = unwrap(api.post) @@ -292,7 +293,7 @@ class TestAccountPasswordApi: assert result["id"] == "u1" - def test_password_wrong_current(self, app): + def test_password_wrong_current(self, app: Flask): api = AccountPasswordApi() method = unwrap(api.post) @@ -317,7 +318,7 @@ class TestAccountPasswordApi: class TestAccountIntegrateApi: - def test_get_integrates(self, app): + def test_get_integrates(self, app: Flask): api = AccountIntegrateApi() method = unwrap(api.get) @@ -336,7 +337,7 @@ class TestAccountIntegrateApi: class TestAccountDeleteApi: - def test_delete_verify_success(self, app): + def test_delete_verify_success(self, app: Flask): api = AccountDeleteVerifyApi() method = unwrap(api.get) @@ -358,7 +359,7 @@ class TestAccountDeleteApi: assert result["result"] == "success" - def test_delete_invalid_code(self, app): + def test_delete_invalid_code(self, app: Flask): api = AccountDeleteApi() method = unwrap(api.post) @@ -379,7 +380,7 @@ class TestAccountDeleteApi: class TestChangeEmailApis: - def test_check_email_code_invalid(self, app): + def test_check_email_code_invalid(self, app: Flask): api = ChangeEmailCheckApi() method = unwrap(api.post) @@ -405,7 +406,7 @@ class TestChangeEmailApis: with pytest.raises(EmailCodeError): method(api) - def test_reset_email_already_used(self, app): + def test_reset_email_already_used(self, app: Flask): api = ChangeEmailResetApi() method = unwrap(api.post) @@ -427,7 +428,7 @@ class TestChangeEmailApis: class TestCheckEmailUniqueApi: - def test_email_unique_success(self, app): + def test_email_unique_success(self, app: Flask): api = CheckEmailUnique() method = unwrap(api.post) @@ -448,7 +449,7 @@ class TestCheckEmailUniqueApi: assert result["result"] == "success" - def test_email_in_freeze(self, app): + def test_email_in_freeze(self, app: Flask): api = CheckEmailUnique() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py b/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py index b4e03f681d..eb0ca15d2e 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.error import AccountNotFound from controllers.console.workspace.agent_providers import ( @@ -16,7 +17,7 @@ def unwrap(func): class TestAgentProviderListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = AgentProviderListApi() method = unwrap(api.get) @@ -39,7 +40,7 @@ class TestAgentProviderListApi: assert result == providers - def test_get_empty_list(self, app): + def test_get_empty_list(self, app: Flask): api = AgentProviderListApi() method = unwrap(api.get) @@ -61,7 +62,7 @@ class TestAgentProviderListApi: assert result == [] - def test_get_account_not_found(self, app): + def test_get_account_not_found(self, app: Flask): api = AgentProviderListApi() method = unwrap(api.get) @@ -77,7 +78,7 @@ class TestAgentProviderListApi: class TestAgentProviderApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = AgentProviderApi() method = unwrap(api.get) @@ -101,7 +102,7 @@ class TestAgentProviderApi: assert result == provider_data - def test_get_provider_not_found(self, app): + def test_get_provider_not_found(self, app: Flask): api = AgentProviderApi() method = unwrap(api.get) @@ -124,7 +125,7 @@ class TestAgentProviderApi: assert result is None - def test_get_account_not_found(self, app): + def test_get_account_not_found(self, app: Flask): api = AgentProviderApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py b/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py index 0b3d7ef6d7..ed7b2d606f 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console import console_ns from controllers.console.workspace.endpoint import ( @@ -39,7 +40,7 @@ def patch_current_account(user_and_tenant): @pytest.mark.usefixtures("patch_current_account") class TestEndpointCollectionApi: - def test_create_success(self, app): + def test_create_success(self, app: Flask): api = EndpointCollectionApi() method = unwrap(api.post) @@ -57,7 +58,7 @@ class TestEndpointCollectionApi: assert result["success"] is True - def test_create_permission_denied(self, app): + def test_create_permission_denied(self, app: Flask): api = EndpointCollectionApi() method = unwrap(api.post) @@ -77,7 +78,7 @@ class TestEndpointCollectionApi: with pytest.raises(ValueError): method(api) - def test_create_validation_error(self, app): + def test_create_validation_error(self, app: Flask): api = EndpointCollectionApi() method = unwrap(api.post) @@ -96,7 +97,7 @@ class TestEndpointCollectionApi: @pytest.mark.usefixtures("patch_current_account") class TestDeprecatedEndpointCreateApi: - def test_create_success(self, app): + def test_create_success(self, app: Flask): api = DeprecatedEndpointCreateApi() method = unwrap(api.post) @@ -117,7 +118,7 @@ class TestDeprecatedEndpointCreateApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointListApi: - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = EndpointListApi() method = unwrap(api.get) @@ -130,7 +131,7 @@ class TestEndpointListApi: assert "endpoints" in result assert len(result["endpoints"]) == 1 - def test_list_invalid_query(self, app): + def test_list_invalid_query(self, app: Flask): api = EndpointListApi() method = unwrap(api.get) @@ -143,7 +144,7 @@ class TestEndpointListApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointListForSinglePluginApi: - def test_list_for_plugin_success(self, app): + def test_list_for_plugin_success(self, app: Flask): api = EndpointListForSinglePluginApi() method = unwrap(api.get) @@ -158,7 +159,7 @@ class TestEndpointListForSinglePluginApi: assert "endpoints" in result - def test_list_for_plugin_missing_param(self, app): + def test_list_for_plugin_missing_param(self, app: Flask): api = EndpointListForSinglePluginApi() method = unwrap(api.get) @@ -171,7 +172,7 @@ class TestEndpointListForSinglePluginApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointItemApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = EndpointItemApi() method = unwrap(api.delete) @@ -187,7 +188,7 @@ class TestEndpointItemApi: assert result["success"] is True mock_delete.assert_called_once_with(tenant_id="t1", user_id="u1", endpoint_id="e1") - def test_delete_service_failure(self, app): + def test_delete_service_failure(self, app: Flask): api = EndpointItemApi() method = unwrap(api.delete) @@ -199,7 +200,7 @@ class TestEndpointItemApi: assert result["success"] is False - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = EndpointItemApi() method = unwrap(api.patch) @@ -226,7 +227,7 @@ class TestEndpointItemApi: settings={"x": 1}, ) - def test_update_validation_error(self, app): + def test_update_validation_error(self, app: Flask): api = EndpointItemApi() method = unwrap(api.patch) @@ -238,7 +239,7 @@ class TestEndpointItemApi: with pytest.raises(ValueError): method(api, "e1") - def test_update_service_failure(self, app): + def test_update_service_failure(self, app: Flask): api = EndpointItemApi() method = unwrap(api.patch) @@ -258,7 +259,7 @@ class TestEndpointItemApi: @pytest.mark.usefixtures("patch_current_account") class TestDeprecatedEndpointDeleteApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) @@ -272,7 +273,7 @@ class TestDeprecatedEndpointDeleteApi: assert result["success"] is True - def test_delete_invalid_payload(self, app): + def test_delete_invalid_payload(self, app: Flask): api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) @@ -282,7 +283,7 @@ class TestDeprecatedEndpointDeleteApi: with pytest.raises(ValueError): method(api) - def test_delete_service_failure(self, app): + def test_delete_service_failure(self, app: Flask): api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) @@ -299,7 +300,7 @@ class TestDeprecatedEndpointDeleteApi: @pytest.mark.usefixtures("patch_current_account") class TestDeprecatedEndpointUpdateApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) @@ -317,7 +318,7 @@ class TestDeprecatedEndpointUpdateApi: assert result["success"] is True - def test_update_validation_error(self, app): + def test_update_validation_error(self, app: Flask): api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) @@ -329,7 +330,7 @@ class TestDeprecatedEndpointUpdateApi: with pytest.raises(ValueError): method(api) - def test_update_service_failure(self, app): + def test_update_service_failure(self, app: Flask): api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) @@ -380,7 +381,7 @@ class TestEndpointRouteMetadata: @pytest.mark.usefixtures("patch_current_account") class TestEndpointEnableApi: - def test_enable_success(self, app): + def test_enable_success(self, app: Flask): api = EndpointEnableApi() method = unwrap(api.post) @@ -394,7 +395,7 @@ class TestEndpointEnableApi: assert result["success"] is True - def test_enable_invalid_payload(self, app): + def test_enable_invalid_payload(self, app: Flask): api = EndpointEnableApi() method = unwrap(api.post) @@ -404,7 +405,7 @@ class TestEndpointEnableApi: with pytest.raises(ValueError): method(api) - def test_enable_service_failure(self, app): + def test_enable_service_failure(self, app: Flask): api = EndpointEnableApi() method = unwrap(api.post) @@ -421,7 +422,7 @@ class TestEndpointEnableApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointDisableApi: - def test_disable_success(self, app): + def test_disable_success(self, app: Flask): api = EndpointDisableApi() method = unwrap(api.post) @@ -435,7 +436,7 @@ class TestEndpointDisableApi: assert result["success"] is True - def test_disable_invalid_payload(self, app): + def test_disable_invalid_payload(self, app: Flask): api = EndpointDisableApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_members.py b/api/tests/unit_tests/controllers/console/workspace/test_members.py index 718b57ba6b..0788ff603c 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_members.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_members.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import HTTPException import services @@ -34,7 +35,7 @@ def unwrap(func): class TestMemberListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = MemberListApi() method = unwrap(api.get) @@ -59,7 +60,7 @@ class TestMemberListApi: assert status == 200 assert len(result["accounts"]) == 1 - def test_get_no_tenant(self, app): + def test_get_no_tenant(self, app: Flask): api = MemberListApi() method = unwrap(api.get) @@ -74,7 +75,7 @@ class TestMemberListApi: class TestMemberInviteEmailApi: - def test_invite_success(self, app): + def test_invite_success(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -101,7 +102,7 @@ class TestMemberInviteEmailApi: assert status == 201 assert result["result"] == "success" - def test_invite_limit_exceeded(self, app): + def test_invite_limit_exceeded(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -123,7 +124,7 @@ class TestMemberInviteEmailApi: with pytest.raises(WorkspaceMembersLimitExceeded): method(api) - def test_invite_already_member(self, app): + def test_invite_already_member(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -151,7 +152,7 @@ class TestMemberInviteEmailApi: assert result["invitation_results"][0]["status"] == "success" - def test_invite_invalid_role(self, app): + def test_invite_invalid_role(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -166,7 +167,7 @@ class TestMemberInviteEmailApi: assert status == 400 assert result["code"] == "invalid-role" - def test_invite_generic_exception(self, app): + def test_invite_generic_exception(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -196,7 +197,7 @@ class TestMemberInviteEmailApi: class TestMemberCancelInviteApi: - def test_cancel_success(self, app): + def test_cancel_success(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -216,7 +217,7 @@ class TestMemberCancelInviteApi: assert status == 200 assert result["result"] == "success" - def test_cancel_not_found(self, app): + def test_cancel_not_found(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -233,7 +234,7 @@ class TestMemberCancelInviteApi: with pytest.raises(HTTPException): method(api, "x") - def test_cancel_cannot_operate_self(self, app): + def test_cancel_cannot_operate_self(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -255,7 +256,7 @@ class TestMemberCancelInviteApi: assert status == 400 - def test_cancel_no_permission(self, app): + def test_cancel_no_permission(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -277,7 +278,7 @@ class TestMemberCancelInviteApi: assert status == 403 - def test_cancel_member_not_in_tenant(self, app): + def test_cancel_member_not_in_tenant(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -301,7 +302,7 @@ class TestMemberCancelInviteApi: class TestMemberUpdateRoleApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = MemberUpdateRoleApi() method = unwrap(api.put) @@ -324,7 +325,7 @@ class TestMemberUpdateRoleApi: assert result["result"] == "success" - def test_update_invalid_role(self, app): + def test_update_invalid_role(self, app: Flask): api = MemberUpdateRoleApi() method = unwrap(api.put) @@ -335,7 +336,7 @@ class TestMemberUpdateRoleApi: assert status == 400 - def test_update_member_not_found(self, app): + def test_update_member_not_found(self, app: Flask): api = MemberUpdateRoleApi() method = unwrap(api.put) @@ -354,7 +355,7 @@ class TestMemberUpdateRoleApi: class TestDatasetOperatorMemberListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetOperatorMemberListApi() method = unwrap(api.get) @@ -381,7 +382,7 @@ class TestDatasetOperatorMemberListApi: assert status == 200 assert len(result["accounts"]) == 1 - def test_get_no_tenant(self, app): + def test_get_no_tenant(self, app: Flask): api = DatasetOperatorMemberListApi() method = unwrap(api.get) @@ -396,7 +397,7 @@ class TestDatasetOperatorMemberListApi: class TestSendOwnerTransferEmailApi: - def test_send_success(self, app): + def test_send_success(self, app: Flask): api = SendOwnerTransferEmailApi() method = unwrap(api.post) @@ -419,7 +420,7 @@ class TestSendOwnerTransferEmailApi: assert result["result"] == "success" - def test_send_ip_limit(self, app): + def test_send_ip_limit(self, app: Flask): api = SendOwnerTransferEmailApi() method = unwrap(api.post) @@ -433,7 +434,7 @@ class TestSendOwnerTransferEmailApi: with pytest.raises(EmailSendIpLimitError): method(api) - def test_send_not_owner(self, app): + def test_send_not_owner(self, app: Flask): api = SendOwnerTransferEmailApi() method = unwrap(api.post) @@ -452,7 +453,7 @@ class TestSendOwnerTransferEmailApi: class TestOwnerTransferCheckApi: - def test_check_invalid_code(self, app): + def test_check_invalid_code(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -477,7 +478,7 @@ class TestOwnerTransferCheckApi: with pytest.raises(EmailCodeError): method(api) - def test_rate_limited(self, app): + def test_rate_limited(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -498,7 +499,7 @@ class TestOwnerTransferCheckApi: with pytest.raises(OwnerTransferLimitError): method(api) - def test_invalid_token(self, app): + def test_invalid_token(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -520,7 +521,7 @@ class TestOwnerTransferCheckApi: with pytest.raises(InvalidTokenError): method(api) - def test_invalid_email(self, app): + def test_invalid_email(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -547,7 +548,7 @@ class TestOwnerTransferCheckApi: class TestOwnerTransferApi: - def test_transfer_self(self, app): + def test_transfer_self(self, app: Flask): api = OwnerTransfer() method = unwrap(api.post) @@ -564,7 +565,7 @@ class TestOwnerTransferApi: with pytest.raises(CannotTransferOwnerToSelfError): method(api, "1") - def test_invalid_token(self, app): + def test_invalid_token(self, app: Flask): api = OwnerTransfer() method = unwrap(api.post) @@ -582,7 +583,7 @@ class TestOwnerTransferApi: with pytest.raises(InvalidTokenError): method(api, "2") - def test_member_not_in_tenant(self, app): + def test_member_not_in_tenant(self, app: Flask): api = OwnerTransfer() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py b/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py index 168479af1e..e836a3cc55 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from pydantic_core import ValidationError from werkzeug.exceptions import Forbidden @@ -26,7 +27,7 @@ def unwrap(func): class TestModelProviderListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ModelProviderListApi() method = unwrap(api.get) @@ -47,7 +48,7 @@ class TestModelProviderListApi: class TestModelProviderCredentialApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.get) @@ -66,7 +67,7 @@ class TestModelProviderCredentialApi: assert "credentials" in result - def test_get_invalid_uuid(self, app): + def test_get_invalid_uuid(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.get) @@ -80,7 +81,7 @@ class TestModelProviderCredentialApi: with pytest.raises(ValidationError): method(api, provider="openai") - def test_post_create_success(self, app): + def test_post_create_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.post) @@ -102,7 +103,7 @@ class TestModelProviderCredentialApi: assert result["result"] == "success" assert status == 201 - def test_post_create_validation_error(self, app): + def test_post_create_validation_error(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.post) @@ -122,7 +123,7 @@ class TestModelProviderCredentialApi: with pytest.raises(ValueError): method(api, provider="openai") - def test_put_update_success(self, app): + def test_put_update_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.put) @@ -143,7 +144,7 @@ class TestModelProviderCredentialApi: assert result["result"] == "success" - def test_put_invalid_uuid(self, app): + def test_put_invalid_uuid(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.put) @@ -159,7 +160,7 @@ class TestModelProviderCredentialApi: with pytest.raises(ValidationError): method(api, provider="openai") - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.delete) @@ -183,7 +184,7 @@ class TestModelProviderCredentialApi: class TestModelProviderCredentialSwitchApi: - def test_switch_success(self, app): + def test_switch_success(self, app: Flask): api = ModelProviderCredentialSwitchApi() method = unwrap(api.post) @@ -204,7 +205,7 @@ class TestModelProviderCredentialSwitchApi: assert result["result"] == "success" - def test_switch_invalid_uuid(self, app): + def test_switch_invalid_uuid(self, app: Flask): api = ModelProviderCredentialSwitchApi() method = unwrap(api.post) @@ -222,7 +223,7 @@ class TestModelProviderCredentialSwitchApi: class TestModelProviderValidateApi: - def test_validate_success(self, app): + def test_validate_success(self, app: Flask): api = ModelProviderValidateApi() method = unwrap(api.post) @@ -243,7 +244,7 @@ class TestModelProviderValidateApi: assert result["result"] == "success" - def test_validate_failure(self, app): + def test_validate_failure(self, app: Flask): api = ModelProviderValidateApi() method = unwrap(api.post) @@ -266,7 +267,7 @@ class TestModelProviderValidateApi: class TestModelProviderIconApi: - def test_icon_success(self, app): + def test_icon_success(self, app: Flask): api = ModelProviderIconApi() with ( @@ -280,7 +281,7 @@ class TestModelProviderIconApi: assert response.mimetype == "image/png" - def test_icon_not_found(self, app): + def test_icon_not_found(self, app: Flask): api = ModelProviderIconApi() with ( @@ -295,7 +296,7 @@ class TestModelProviderIconApi: class TestPreferredProviderTypeUpdateApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = PreferredProviderTypeUpdateApi() method = unwrap(api.post) @@ -316,7 +317,7 @@ class TestPreferredProviderTypeUpdateApi: assert result["result"] == "success" - def test_invalid_enum(self, app): + def test_invalid_enum(self, app: Flask): api = PreferredProviderTypeUpdateApi() method = unwrap(api.post) @@ -334,7 +335,7 @@ class TestPreferredProviderTypeUpdateApi: class TestModelProviderPaymentCheckoutUrlApi: - def test_checkout_success(self, app): + def test_checkout_success(self, app: Flask): api = ModelProviderPaymentCheckoutUrlApi() method = unwrap(api.get) @@ -359,7 +360,7 @@ class TestModelProviderPaymentCheckoutUrlApi: assert "url" in result - def test_invalid_provider(self, app): + def test_invalid_provider(self, app: Flask): api = ModelProviderPaymentCheckoutUrlApi() method = unwrap(api.get) @@ -367,7 +368,7 @@ class TestModelProviderPaymentCheckoutUrlApi: with pytest.raises(ValueError): method(api, provider="openai") - def test_permission_denied(self, app): + def test_permission_denied(self, app: Flask): api = ModelProviderPaymentCheckoutUrlApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_models.py b/api/tests/unit_tests/controllers/console/workspace/test_models.py index f0d32f81fb..3c4acbab44 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_models.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_models.py @@ -32,7 +32,7 @@ class TestDefaultModelApi: with ( app.test_request_context( "/", - query_string={"model_type": ModelType.LLM.value}, + query_string={"model_type": ModelType.LLM}, ), patch( "controllers.console.workspace.models.current_account_with_tenant", @@ -53,7 +53,7 @@ class TestDefaultModelApi: payload = { "model_settings": [ { - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "provider": "openai", "model": "gpt-4", } @@ -72,12 +72,12 @@ class TestDefaultModelApi: assert result["result"] == "success" - def test_get_returns_empty_when_no_default(self, app): + def test_get_returns_empty_when_no_default(self, app: Flask): api = DefaultModelApi() method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, ): @@ -113,7 +113,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "load_balancing": { "configs": [{"weight": 1}], "enabled": True, @@ -139,7 +139,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -154,7 +154,7 @@ class TestModelProviderModelApi: assert status == 204 - def test_get_models_returns_empty(self, app): + def test_get_models_returns_empty(self, app: Flask): api = ModelProviderModelApi() method = unwrap(api.get) @@ -180,7 +180,7 @@ class TestModelProviderModelCredentialApi: "/", query_string={ "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, }, ), patch( @@ -208,7 +208,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -224,12 +224,12 @@ class TestModelProviderModelCredentialApi: assert status == 201 - def test_get_empty_credentials(self, app): + def test_get_empty_credentials(self, app: Flask): api = ModelProviderModelCredentialApi() method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, patch("controllers.console.workspace.models.ModelLoadBalancingService") as lb, @@ -242,13 +242,13 @@ class TestModelProviderModelCredentialApi: assert result["credentials"] == {} - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = ModelProviderModelCredentialApi() method = unwrap(api.delete) payload = { "model": "gpt", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "123e4567-e89b-12d3-a456-426614174000", } @@ -269,7 +269,7 @@ class TestModelProviderModelCredentialSwitchApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "abc", } @@ -293,7 +293,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -314,7 +314,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -337,7 +337,7 @@ class TestModelProviderModelValidateApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -360,7 +360,7 @@ class TestModelProviderModelValidateApi: payload = { "model": model_name, - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {}, } @@ -412,11 +412,11 @@ class TestParameterAndAvailableModels: ): service_mock.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert "data" in result - def test_empty_rules(self, app): + def test_empty_rules(self, app: Flask): api = ModelProviderModelParameterRuleApi() method = unwrap(api.get) @@ -431,7 +431,7 @@ class TestParameterAndAvailableModels: assert result["data"] == [] - def test_no_models(self, app): + def test_no_models(self, app: Flask): api = ModelProviderAvailableModelApi() method = unwrap(api.get) @@ -442,6 +442,6 @@ class TestParameterAndAvailableModels: ): service.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert result["data"] == [] diff --git a/api/tests/unit_tests/controllers/console/workspace/test_plugin.py b/api/tests/unit_tests/controllers/console/workspace/test_plugin.py index ce5fd1c466..d01bf7d668 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_plugin.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_plugin.py @@ -2,6 +2,7 @@ import io from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import Forbidden @@ -61,7 +62,7 @@ def tenant(): class TestPluginListLatestVersionsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginListLatestVersionsApi() method = unwrap(api.post) @@ -77,7 +78,7 @@ class TestPluginListLatestVersionsApi: assert "versions" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginListLatestVersionsApi() method = unwrap(api.post) @@ -95,7 +96,7 @@ class TestPluginListLatestVersionsApi: class TestPluginDebuggingKeyApi: - def test_debugging_key_success(self, app): + def test_debugging_key_success(self, app: Flask): api = PluginDebuggingKeyApi() method = unwrap(api.get) @@ -108,7 +109,7 @@ class TestPluginDebuggingKeyApi: assert result["key"] == "k" - def test_debugging_key_error(self, app): + def test_debugging_key_error(self, app: Flask): api = PluginDebuggingKeyApi() method = unwrap(api.get) @@ -125,7 +126,7 @@ class TestPluginDebuggingKeyApi: class TestPluginListApi: - def test_plugin_list(self, app): + def test_plugin_list(self, app: Flask): api = PluginListApi() method = unwrap(api.get) @@ -142,7 +143,7 @@ class TestPluginListApi: class TestPluginIconApi: - def test_plugin_icon(self, app): + def test_plugin_icon(self, app: Flask): api = PluginIconApi() method = unwrap(api.get) @@ -156,7 +157,7 @@ class TestPluginIconApi: class TestPluginAssetApi: - def test_plugin_asset(self, app): + def test_plugin_asset(self, app: Flask): api = PluginAssetApi() method = unwrap(api.get) @@ -171,7 +172,7 @@ class TestPluginAssetApi: class TestPluginUploadFromPkgApi: - def test_upload_pkg_success(self, app): + def test_upload_pkg_success(self, app: Flask): api = PluginUploadFromPkgApi() method = unwrap(api.post) @@ -188,7 +189,7 @@ class TestPluginUploadFromPkgApi: assert result["ok"] is True - def test_upload_pkg_too_large(self, app): + def test_upload_pkg_too_large(self, app: Flask): api = PluginUploadFromPkgApi() method = unwrap(api.post) @@ -210,7 +211,7 @@ class TestPluginUploadFromPkgApi: class TestPluginInstallFromPkgApi: - def test_install_from_pkg(self, app): + def test_install_from_pkg(self, app: Flask): api = PluginInstallFromPkgApi() method = unwrap(api.post) @@ -229,7 +230,7 @@ class TestPluginInstallFromPkgApi: class TestPluginUninstallApi: - def test_uninstall(self, app): + def test_uninstall(self, app: Flask): api = PluginUninstallApi() method = unwrap(api.post) @@ -246,7 +247,7 @@ class TestPluginUninstallApi: class TestPluginChangePermissionApi: - def test_change_permission_forbidden(self, app): + def test_change_permission_forbidden(self, app: Flask): api = PluginChangePermissionApi() method = unwrap(api.post) @@ -264,7 +265,7 @@ class TestPluginChangePermissionApi: with pytest.raises(Forbidden): method(api) - def test_change_permission_success(self, app): + def test_change_permission_success(self, app: Flask): api = PluginChangePermissionApi() method = unwrap(api.post) @@ -286,7 +287,7 @@ class TestPluginChangePermissionApi: class TestPluginFetchPermissionApi: - def test_fetch_permission_default(self, app): + def test_fetch_permission_default(self, app: Flask): api = PluginFetchPermissionApi() method = unwrap(api.get) @@ -319,7 +320,7 @@ class TestPluginFetchDynamicSelectOptionsApi: class TestPluginReadmeApi: - def test_fetch_readme(self, app): + def test_fetch_readme(self, app: Flask): api = PluginReadmeApi() method = unwrap(api.get) @@ -334,7 +335,7 @@ class TestPluginReadmeApi: class TestPluginListInstallationsFromIdsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginListInstallationsFromIdsApi() method = unwrap(api.post) @@ -352,7 +353,7 @@ class TestPluginListInstallationsFromIdsApi: assert "plugins" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginListInstallationsFromIdsApi() method = unwrap(api.post) @@ -371,7 +372,7 @@ class TestPluginListInstallationsFromIdsApi: class TestPluginUploadFromGithubApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUploadFromGithubApi() method = unwrap(api.post) @@ -388,7 +389,7 @@ class TestPluginUploadFromGithubApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginUploadFromGithubApi() method = unwrap(api.post) @@ -407,7 +408,7 @@ class TestPluginUploadFromGithubApi: class TestPluginUploadFromBundleApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUploadFromBundleApi() method = unwrap(api.post) @@ -430,7 +431,7 @@ class TestPluginUploadFromBundleApi: assert result["ok"] is True - def test_too_large(self, app): + def test_too_large(self, app: Flask): api = PluginUploadFromBundleApi() method = unwrap(api.post) @@ -458,7 +459,7 @@ class TestPluginUploadFromBundleApi: class TestPluginInstallFromGithubApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginInstallFromGithubApi() method = unwrap(api.post) @@ -478,7 +479,7 @@ class TestPluginInstallFromGithubApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginInstallFromGithubApi() method = unwrap(api.post) @@ -502,7 +503,7 @@ class TestPluginInstallFromGithubApi: class TestPluginInstallFromMarketplaceApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginInstallFromMarketplaceApi() method = unwrap(api.post) @@ -520,7 +521,7 @@ class TestPluginInstallFromMarketplaceApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginInstallFromMarketplaceApi() method = unwrap(api.post) @@ -539,7 +540,7 @@ class TestPluginInstallFromMarketplaceApi: class TestPluginFetchMarketplacePkgApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchMarketplacePkgApi() method = unwrap(api.get) @@ -552,7 +553,7 @@ class TestPluginFetchMarketplacePkgApi: assert "manifest" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchMarketplacePkgApi() method = unwrap(api.get) @@ -569,7 +570,7 @@ class TestPluginFetchMarketplacePkgApi: class TestPluginFetchManifestApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchManifestApi() method = unwrap(api.get) @@ -585,7 +586,7 @@ class TestPluginFetchManifestApi: assert "manifest" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchManifestApi() method = unwrap(api.get) @@ -602,7 +603,7 @@ class TestPluginFetchManifestApi: class TestPluginFetchInstallTasksApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchInstallTasksApi() method = unwrap(api.get) @@ -615,7 +616,7 @@ class TestPluginFetchInstallTasksApi: assert "tasks" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchInstallTasksApi() method = unwrap(api.get) @@ -632,7 +633,7 @@ class TestPluginFetchInstallTasksApi: class TestPluginFetchInstallTaskApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchInstallTaskApi() method = unwrap(api.get) @@ -645,7 +646,7 @@ class TestPluginFetchInstallTaskApi: assert "task" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchInstallTaskApi() method = unwrap(api.get) @@ -662,7 +663,7 @@ class TestPluginFetchInstallTaskApi: class TestPluginDeleteInstallTaskApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginDeleteInstallTaskApi() method = unwrap(api.post) @@ -675,7 +676,7 @@ class TestPluginDeleteInstallTaskApi: assert result["success"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginDeleteInstallTaskApi() method = unwrap(api.post) @@ -692,7 +693,7 @@ class TestPluginDeleteInstallTaskApi: class TestPluginDeleteAllInstallTaskItemsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginDeleteAllInstallTaskItemsApi() method = unwrap(api.post) @@ -707,7 +708,7 @@ class TestPluginDeleteAllInstallTaskItemsApi: assert result["success"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginDeleteAllInstallTaskItemsApi() method = unwrap(api.post) @@ -724,7 +725,7 @@ class TestPluginDeleteAllInstallTaskItemsApi: class TestPluginDeleteInstallTaskItemApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginDeleteInstallTaskItemApi() method = unwrap(api.post) @@ -737,7 +738,7 @@ class TestPluginDeleteInstallTaskItemApi: assert result["success"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginDeleteInstallTaskItemApi() method = unwrap(api.post) @@ -754,7 +755,7 @@ class TestPluginDeleteInstallTaskItemApi: class TestPluginUpgradeFromMarketplaceApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUpgradeFromMarketplaceApi() method = unwrap(api.post) @@ -775,7 +776,7 @@ class TestPluginUpgradeFromMarketplaceApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginUpgradeFromMarketplaceApi() method = unwrap(api.post) @@ -797,7 +798,7 @@ class TestPluginUpgradeFromMarketplaceApi: class TestPluginUpgradeFromGithubApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUpgradeFromGithubApi() method = unwrap(api.post) @@ -821,7 +822,7 @@ class TestPluginUpgradeFromGithubApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginUpgradeFromGithubApi() method = unwrap(api.post) @@ -846,7 +847,7 @@ class TestPluginUpgradeFromGithubApi: class TestPluginFetchDynamicSelectOptionsWithCredentialsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchDynamicSelectOptionsWithCredentialsApi() method = unwrap(api.post) @@ -873,7 +874,7 @@ class TestPluginFetchDynamicSelectOptionsWithCredentialsApi: assert result["options"] == [1] - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchDynamicSelectOptionsWithCredentialsApi() method = unwrap(api.post) @@ -901,7 +902,7 @@ class TestPluginFetchDynamicSelectOptionsWithCredentialsApi: class TestPluginChangePreferencesApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginChangePreferencesApi() method = unwrap(api.post) @@ -931,7 +932,7 @@ class TestPluginChangePreferencesApi: assert result["success"] is True - def test_permission_fail(self, app): + def test_permission_fail(self, app: Flask): api = PluginChangePreferencesApi() method = unwrap(api.post) @@ -962,7 +963,7 @@ class TestPluginChangePreferencesApi: class TestPluginFetchPreferencesApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchPreferencesApi() method = unwrap(api.get) @@ -996,7 +997,7 @@ class TestPluginFetchPreferencesApi: class TestPluginAutoUpgradeExcludePluginApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginAutoUpgradeExcludePluginApi() method = unwrap(api.post) @@ -1011,7 +1012,7 @@ class TestPluginAutoUpgradeExcludePluginApi: assert result["success"] is True - def test_fail(self, app): + def test_fail(self, app: Flask): api = PluginAutoUpgradeExcludePluginApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_workspace.py b/api/tests/unit_tests/controllers/console/workspace/test_workspace.py index e82a29f045..a52518c2d2 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_workspace.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_workspace.py @@ -2,6 +2,7 @@ from io import BytesIO from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import Unauthorized @@ -37,7 +38,7 @@ def unwrap(func): class TestTenantListApi: - def test_get_success_saas_path(self, app): + def test_get_success_saas_path(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -85,7 +86,7 @@ class TestTenantListApi: get_plan_bulk_mock.assert_called_once_with(["t1", "t2"]) get_features_mock.assert_not_called() - def test_get_saas_path_partial_fallback_does_not_gate_plan_on_billing_enabled(self, app): + def test_get_saas_path_partial_fallback_does_not_gate_plan_on_billing_enabled(self, app: Flask): """Bulk omits a tenant: resolve plan via subscription.plan only; billing.enabled is not used. billing.enabled is mocked False to prove the endpoint does not gate on it for this path @@ -140,7 +141,7 @@ class TestTenantListApi: get_plan_bulk_mock.assert_called_once_with(["t1", "t2"]) get_features_mock.assert_called_once_with("t2") - def test_get_saas_path_falls_back_to_legacy_feature_path_on_bulk_error(self, app): + def test_get_saas_path_falls_back_to_legacy_feature_path_on_bulk_error(self, app: Flask): """Test fallback to FeatureService when bulk billing returns empty result. BillingService.get_plan_bulk catches exceptions internally and returns empty dict, @@ -197,7 +198,7 @@ class TestTenantListApi: assert get_features_mock.call_count == 2 logger_warning_mock.assert_called_once() - def test_get_billing_disabled_community_path(self, app): + def test_get_billing_disabled_community_path(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -236,7 +237,7 @@ class TestTenantListApi: assert result["workspaces"][0]["plan"] == CloudPlan.SANDBOX get_features_mock.assert_called_once_with("t1") - def test_get_enterprise_only_skips_feature_service(self, app): + def test_get_enterprise_only_skips_feature_service(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -276,7 +277,7 @@ class TestTenantListApi: assert result["workspaces"][1]["current"] is True get_features_mock.assert_not_called() - def test_get_enterprise_only_with_empty_tenants(self, app): + def test_get_enterprise_only_with_empty_tenants(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -302,7 +303,7 @@ class TestTenantListApi: class TestWorkspaceListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = WorkspaceListApi() method = unwrap(api.get) @@ -324,7 +325,7 @@ class TestWorkspaceListApi: assert result["total"] == 1 assert result["has_more"] is False - def test_get_has_next_true(self, app): + def test_get_has_next_true(self, app: Flask): api = WorkspaceListApi() method = unwrap(api.get) @@ -355,7 +356,7 @@ class TestWorkspaceListApi: class TestTenantApi: - def test_post_active_tenant(self, app): + def test_post_active_tenant(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -375,7 +376,7 @@ class TestTenantApi: assert status == 200 assert result["id"] == "t1" - def test_post_archived_with_switch(self, app): + def test_post_archived_with_switch(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -397,7 +398,7 @@ class TestTenantApi: assert result["id"] == "new" - def test_post_archived_no_tenant(self, app): + def test_post_archived_no_tenant(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -411,7 +412,7 @@ class TestTenantApi: with pytest.raises(Unauthorized): method(api) - def test_post_info_path(self, app): + def test_post_info_path(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -454,7 +455,7 @@ class TestTenantInfoResponse: class TestSwitchWorkspaceApi: - def test_switch_success(self, app): + def test_switch_success(self, app: Flask): api = SwitchWorkspaceApi() method = unwrap(api.post) @@ -477,7 +478,7 @@ class TestSwitchWorkspaceApi: assert result["result"] == "success" - def test_switch_not_linked(self, app): + def test_switch_not_linked(self, app: Flask): api = SwitchWorkspaceApi() method = unwrap(api.post) @@ -493,7 +494,7 @@ class TestSwitchWorkspaceApi: with pytest.raises(AccountNotLinkTenantError): method(api) - def test_switch_tenant_not_found(self, app): + def test_switch_tenant_not_found(self, app: Flask): api = SwitchWorkspaceApi() method = unwrap(api.post) @@ -515,7 +516,7 @@ class TestSwitchWorkspaceApi: class TestCustomConfigWorkspaceApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = CustomConfigWorkspaceApi() method = unwrap(api.post) @@ -538,7 +539,7 @@ class TestCustomConfigWorkspaceApi: assert result["result"] == "success" - def test_logo_fallback(self, app): + def test_logo_fallback(self, app: Flask): api = CustomConfigWorkspaceApi() method = unwrap(api.post) @@ -569,7 +570,7 @@ class TestCustomConfigWorkspaceApi: class TestWebappLogoWorkspaceApi: - def test_no_file(self, app): + def test_no_file(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -582,7 +583,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(NoFileUploadedError): method(api) - def test_too_many_files(self, app): + def test_too_many_files(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -601,7 +602,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(TooManyFilesError): method(api) - def test_invalid_extension(self, app): + def test_invalid_extension(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -616,7 +617,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(UnsupportedFileTypeError): method(api) - def test_upload_success(self, app): + def test_upload_success(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -648,7 +649,7 @@ class TestWebappLogoWorkspaceApi: assert status == 201 assert result["id"] == "file1" - def test_filename_missing(self, app): + def test_filename_missing(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -672,7 +673,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(FilenameNotExistsError): method(api) - def test_file_too_large(self, app): + def test_file_too_large(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -701,7 +702,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(FileTooLargeError): method(api) - def test_service_unsupported_file(self, app): + def test_service_unsupported_file(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -732,7 +733,7 @@ class TestWebappLogoWorkspaceApi: class TestWorkspaceInfoApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = WorkspaceInfoApi() method = unwrap(api.post) @@ -756,7 +757,7 @@ class TestWorkspaceInfoApi: assert result["result"] == "success" - def test_no_current_tenant(self, app): + def test_no_current_tenant(self, app: Flask): api = WorkspaceInfoApi() method = unwrap(api.post) @@ -774,7 +775,7 @@ class TestWorkspaceInfoApi: class TestWorkspacePermissionApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = WorkspacePermissionApi() method = unwrap(api.get) @@ -799,7 +800,7 @@ class TestWorkspacePermissionApi: assert status == 200 assert result["workspace_id"] == "t1" - def test_no_current_tenant(self, app): + def test_no_current_tenant(self, app: Flask): api = WorkspacePermissionApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py index d1b09c3a58..598677faff 100644 --- a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py +++ b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py @@ -189,7 +189,7 @@ class TestGetUserTenant: """Test get_user_tenant decorator""" @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch): """Test that decorator injects tenant_model and user_model into kwargs""" # Arrange @@ -244,7 +244,9 @@ class TestGetUserTenant: protected_view() @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_use_default_session_id_when_user_id_empty(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_use_default_session_id_when_user_id_empty( + self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch + ): """Test that default session ID is used when user_id is empty string""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_app.py b/api/tests/unit_tests/controllers/service_api/app/test_app.py index f5d93b5ac3..ae0edcf382 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_app.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_app.py @@ -41,7 +41,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_for_chat_app( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test retrieving parameters for a chat app.""" # Arrange @@ -91,7 +91,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_for_workflow_app( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test retrieving parameters for a workflow app.""" # Arrange @@ -136,7 +136,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_raises_error_when_chat_config_missing( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test that AppUnavailableError is raised when chat app has no config.""" # Arrange @@ -174,7 +174,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_raises_error_when_workflow_missing( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test that AppUnavailableError is raised when workflow app has no workflow.""" # Arrange @@ -234,7 +234,14 @@ class TestAppMetaApi: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.app.app.AppService") def test_get_app_meta( - self, mock_app_service, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, + mock_app_service, + mock_db, + mock_validate_token, + mock_current_app, + mock_user_logged_in, + app: Flask, + mock_app_model, ): """Test retrieving app metadata via AppService.""" # Arrange @@ -310,7 +317,7 @@ class TestAppInfoApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_app_info( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test retrieving basic app information.""" mock_current_app.login_manager = Mock() @@ -402,7 +409,9 @@ class TestAppInfoApi: @patch("controllers.service_api.wraps.current_app") @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") - def test_get_app_info_with_no_tags(self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app): + def test_get_app_info_with_no_tags( + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask + ): """Test retrieving app info when app has no tags.""" # Arrange mock_current_app.login_manager = Mock() @@ -453,7 +462,7 @@ class TestAppInfoApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_app_info_returns_correct_mode( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, app_mode + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, app_mode ): """Test that all app modes are correctly returned.""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_audio.py b/api/tests/unit_tests/controllers/service_api/app/test_audio.py index c16ebad739..4741481ef6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_audio.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_audio.py @@ -13,6 +13,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import InternalServerError @@ -190,7 +191,7 @@ class TestAudioServiceMockedBehavior: class TestAudioApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(AudioService, "transcript_asr", lambda **_kwargs: {"text": "ok"}) api = AudioApi() handler = _unwrap(api.post) @@ -216,7 +217,7 @@ class TestAudioApi: (InvokeError("invoke"), CompletionRequestError), ], ) - def test_error_mapping(self, app, monkeypatch: pytest.MonkeyPatch, exc, expected) -> None: + def test_error_mapping(self, app: Flask, monkeypatch: pytest.MonkeyPatch, exc, expected) -> None: monkeypatch.setattr(AudioService, "transcript_asr", lambda **_kwargs: (_ for _ in ()).throw(exc)) api = AudioApi() handler = _unwrap(api.post) @@ -227,7 +228,7 @@ class TestAudioApi: with pytest.raises(expected): handler(api, app_model=app_model, end_user=end_user) - def test_unhandled_error(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_unhandled_error(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AudioService, "transcript_asr", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("boom")) ) @@ -242,7 +243,7 @@ class TestAudioApi: class TestTextApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(AudioService, "transcript_tts", lambda **_kwargs: {"audio": "ok"}) api = TextApi() @@ -259,7 +260,7 @@ class TestTextApi: assert response == {"audio": "ok"} - def test_error_mapping(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_error_mapping(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AudioService, "transcript_tts", lambda **_kwargs: (_ for _ in ()).throw(QuotaExceededError()) ) diff --git a/api/tests/unit_tests/controllers/service_api/app/test_completion.py b/api/tests/unit_tests/controllers/service_api/app/test_completion.py index 3364c07e62..259741937f 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_completion.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_completion.py @@ -16,6 +16,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from pydantic import ValidationError from werkzeug.exceptions import BadRequest, NotFound @@ -295,7 +296,7 @@ class TestCompletionControllerLogic: @patch("controllers.service_api.app.completion.service_api_ns") @patch("controllers.service_api.app.completion.AppGenerateService") - def test_completion_api_post_success(self, mock_generate_service, mock_service_api_ns, app): + def test_completion_api_post_success(self, mock_generate_service, mock_service_api_ns, app: Flask): """Test CompletionApi.post success path.""" from controllers.service_api.app.completion import CompletionApi @@ -320,7 +321,7 @@ class TestCompletionControllerLogic: mock_generate_service.generate.assert_called_once() @patch("controllers.service_api.app.completion.service_api_ns") - def test_completion_api_post_wrong_app_mode(self, mock_service_api_ns, app): + def test_completion_api_post_wrong_app_mode(self, mock_service_api_ns, app: Flask): """Test CompletionApi.post with wrong app mode.""" from controllers.service_api.app.completion import CompletionApi @@ -334,7 +335,7 @@ class TestCompletionControllerLogic: @patch("controllers.service_api.app.completion.service_api_ns") @patch("controllers.service_api.app.completion.AppGenerateService") - def test_chat_api_post_success(self, mock_generate_service, mock_service_api_ns, app): + def test_chat_api_post_success(self, mock_generate_service, mock_service_api_ns, app: Flask): """Test ChatApi.post success path.""" from controllers.service_api.app.completion import ChatApi @@ -355,7 +356,7 @@ class TestCompletionControllerLogic: assert response == {"text": "compacted"} @patch("controllers.service_api.app.completion.service_api_ns") - def test_chat_api_post_wrong_app_mode(self, mock_service_api_ns, app): + def test_chat_api_post_wrong_app_mode(self, mock_service_api_ns, app: Flask): """Test ChatApi.post with wrong app mode.""" from controllers.service_api.app.completion import ChatApi @@ -368,7 +369,7 @@ class TestCompletionControllerLogic: ChatApi().post.__wrapped__(ChatApi(), mock_app_model, mock_end_user) @patch("controllers.service_api.app.completion.AppTaskService") - def test_completion_stop_api_success(self, mock_task_service, app): + def test_completion_stop_api_success(self, mock_task_service, app: Flask): """Test CompletionStopApi.post success.""" from controllers.service_api.app.completion import CompletionStopApi @@ -385,7 +386,7 @@ class TestCompletionControllerLogic: mock_task_service.stop_task.assert_called_once() @patch("controllers.service_api.app.completion.AppTaskService") - def test_chat_stop_api_success(self, mock_task_service, app): + def test_chat_stop_api_success(self, mock_task_service, app: Flask): """Test ChatStopApi.post success.""" from controllers.service_api.app.completion import ChatStopApi diff --git a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py index 4fb8ecf784..74c13d50f6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py @@ -20,6 +20,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, NotFound import services @@ -339,7 +340,7 @@ class TestConversationAppModeValidation: @pytest.mark.parametrize( "mode", [ - AppMode.CHAT.value, + AppMode.CHAT, AppMode.AGENT_CHAT.value, AppMode.ADVANCED_CHAT.value, ], @@ -364,7 +365,7 @@ class TestConversationAppModeValidation: app raises NotChatAppError. """ app = Mock(spec=App) - app.mode = AppMode.COMPLETION.value + app.mode = AppMode.COMPLETION app_mode = AppMode.value_of(app.mode) assert app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT} @@ -497,14 +498,14 @@ class TestConversationApiController: def test_list_not_chat(self, app) -> None: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations", method="GET"): with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user) - def test_list_last_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_list_last_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: class _BeginStub: def __enter__(self): return SimpleNamespace() @@ -530,7 +531,7 @@ class TestConversationApiController: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -545,14 +546,14 @@ class TestConversationDetailApiController: def test_delete_not_chat(self, app) -> None: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user, c_id="00000000-0000-0000-0000-000000000001") - def test_delete_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_delete_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "delete", @@ -561,7 +562,7 @@ class TestConversationDetailApiController: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -570,7 +571,7 @@ class TestConversationDetailApiController: class TestConversationRenameApiController: - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "rename", @@ -579,7 +580,7 @@ class TestConversationRenameApiController: api = ConversationRenameApi() handler = _unwrap(api.post) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -595,14 +596,14 @@ class TestConversationVariablesApiController: def test_not_chat(self, app) -> None: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1/variables", method="GET"): with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user, c_id="00000000-0000-0000-0000-000000000001") - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "get_conversational_variable", @@ -611,7 +612,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -621,7 +622,7 @@ class TestConversationVariablesApiController: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, c_id="00000000-0000-0000-0000-000000000001") - def test_success_serializes_response(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success_serializes_response(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) monkeypatch.setattr( ConversationService, @@ -644,7 +645,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -661,7 +662,7 @@ class TestConversationVariablesApiController: class TestConversationVariableDetailApiController: - def test_update_type_mismatch(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_update_type_mismatch(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "update_conversation_variable", @@ -670,7 +671,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -687,7 +688,7 @@ class TestConversationVariableDetailApiController: variable_id="00000000-0000-0000-0000-000000000002", ) - def test_update_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_update_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "update_conversation_variable", @@ -696,7 +697,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -713,7 +714,7 @@ class TestConversationVariableDetailApiController: variable_id="00000000-0000-0000-0000-000000000002", ) - def test_update_success_serializes_response(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_update_success_serializes_response(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) monkeypatch.setattr( ConversationService, @@ -730,7 +731,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( diff --git a/api/tests/unit_tests/controllers/service_api/app/test_file.py b/api/tests/unit_tests/controllers/service_api/app/test_file.py index 7060bd79df..2615c3edac 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_file.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_file.py @@ -16,6 +16,7 @@ import uuid from unittest.mock import Mock, patch import pytest +from flask import Flask from controllers.common.errors import ( FilenameNotExistsError, @@ -282,7 +283,7 @@ class TestFileApiPost: assert status == 201 mock_file_svc_cls.return_value.upload_file.assert_called_once() - def test_upload_no_file(self, app, mock_app_model, mock_end_user): + def test_upload_no_file(self, app: Flask, mock_app_model, mock_end_user): """Test NoFileUploadedError when no file in request.""" from controllers.service_api.app.file import FileApi @@ -296,7 +297,7 @@ class TestFileApiPost: with pytest.raises(NoFileUploadedError): _unwrap(api.post)(api, app_model=mock_app_model, end_user=mock_end_user) - def test_upload_too_many_files(self, app, mock_app_model, mock_end_user): + def test_upload_too_many_files(self, app: Flask, mock_app_model, mock_end_user): """Test TooManyFilesError when multiple files uploaded.""" from io import BytesIO @@ -317,7 +318,7 @@ class TestFileApiPost: with pytest.raises(TooManyFilesError): _unwrap(api.post)(api, app_model=mock_app_model, end_user=mock_end_user) - def test_upload_no_mimetype(self, app, mock_app_model, mock_end_user): + def test_upload_no_mimetype(self, app: Flask, mock_app_model, mock_end_user): """Test UnsupportedFileTypeError when file has no mimetype.""" from io import BytesIO diff --git a/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py index 846d5368f3..510d4a9470 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py @@ -11,6 +11,7 @@ from types import SimpleNamespace from unittest.mock import ANY, MagicMock, Mock import pytest +from flask import Flask import services.app_generate_service as ags_module from controllers.service_api.app.workflow_events import WorkflowEventsApi @@ -281,7 +282,7 @@ class TestHitlServiceApi: workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) def test_workflow_events_snapshot_continue_on_pause_keeps_pause_open( - self, app, monkeypatch: pytest.MonkeyPatch + self, app: Flask, monkeypatch: pytest.MonkeyPatch ) -> None: workflow_run = SimpleNamespace( id="run-1", diff --git a/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py b/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py index 531f722ceb..dcb577f362 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py @@ -11,6 +11,7 @@ from unittest.mock import Mock import pytest from werkzeug.exceptions import NotFound +from controllers.common.human_input import HumanInputFormSubmitPayload from controllers.service_api.app.human_input_form import WorkflowHumanInputFormApi from models.human_input import RecipientType from tests.unit_tests.controllers.service_api.conftest import _unwrap @@ -145,6 +146,71 @@ class TestWorkflowHumanInputFormApi: submission_end_user_id="end-user-1", ) + def test_post_accepts_select_file_and_file_list_inputs(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.post) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + end_user = SimpleNamespace(id="end-user-1") + inputs = { + "decision": "approve", + "attachment": { + "transfer_method": "local_file", + "upload_file_id": "4e0d1b87-52f2-49f6-b8c6-95cd9c954b3e", + "type": "document", + }, + "attachments": [ + { + "transfer_method": "local_file", + "upload_file_id": "1a77f0df-c0e6-461c-987c-e72526f341ee", + "type": "document", + }, + { + "transfer_method": "remote_url", + "url": "https://example.com/report.pdf", + "type": "document", + }, + ], + } + + with app.test_request_context( + "/form/human_input/token-1", + method="POST", + json={"inputs": inputs, "action": "approve", "user": "external-1"}, + ): + response, status = handler(api, app_model=app_model, end_user=end_user, form_token="token-1") + + assert response == {} + assert status == 200 + service_mock.submit_form_by_token.assert_called_once_with( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token-1", + selected_action_id="approve", + form_data=inputs, + submission_end_user_id="end-user-1", + ) + + def test_submit_payload_schema_documents_select_file_and_file_list_inputs(self) -> None: + schema = HumanInputFormSubmitPayload.model_json_schema() + + inputs_schema = schema["properties"]["inputs"] + assert "select input" in inputs_schema["description"] + examples = inputs_schema["examples"] + assert examples[0]["decision"] == "approve" + assert examples[0]["attachment"]["transfer_method"] == "local_file" + assert examples[0]["attachment"]["upload_file_id"] == "4e0d1b87-52f2-49f6-b8c6-95cd9c954b3e" + assert examples[0]["attachments"][1]["transfer_method"] == "remote_url" + @pytest.mark.parametrize( "recipient_type", [ diff --git a/api/tests/unit_tests/controllers/service_api/app/test_message.py b/api/tests/unit_tests/controllers/service_api/app/test_message.py index c2b8aed1ae..2bc9771862 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_message.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_message.py @@ -19,6 +19,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, InternalServerError, NotFound from controllers.service_api.app.error import NotChatAppError @@ -390,7 +391,7 @@ class TestMessageListApi: with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user) - def test_conversation_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_conversation_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "pagination_by_first_id", @@ -409,7 +410,7 @@ class TestMessageListApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user) - def test_first_message_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_first_message_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "pagination_by_first_id", @@ -430,7 +431,7 @@ class TestMessageListApi: class TestMessageFeedbackApi: - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "create_feedback", @@ -452,7 +453,7 @@ class TestMessageFeedbackApi: class TestAppGetFeedbacksApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(MessageService, "get_all_messages_feedbacks", lambda *_args, **_kwargs: ["f1"]) api = AppGetFeedbacksApi() @@ -476,7 +477,7 @@ class TestMessageSuggestedApi: with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", @@ -492,7 +493,7 @@ class TestMessageSuggestedApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_disabled(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_disabled(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", @@ -508,7 +509,7 @@ class TestMessageSuggestedApi: with pytest.raises(BadRequest): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_internal_error(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_internal_error(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", @@ -524,7 +525,7 @@ class TestMessageSuggestedApi: with pytest.raises(InternalServerError): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", diff --git a/api/tests/unit_tests/controllers/service_api/app/test_workflow.py b/api/tests/unit_tests/controllers/service_api/app/test_workflow.py index e1f75319dd..c9f09ac7ee 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_workflow.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_workflow.py @@ -20,6 +20,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, NotFound from controllers.service_api.app.error import NotWorkflowAppError @@ -386,7 +387,7 @@ class TestWorkflowRunRepository: class TestWorkflowRunDetailApi: - def test_not_workflow_app(self, app) -> None: + def test_not_workflow_app(self, app: Flask) -> None: api = WorkflowRunDetailApi() handler = _unwrap(api.get) app_model = SimpleNamespace(mode=AppMode.CHAT.value) @@ -417,7 +418,7 @@ class TestWorkflowRunDetailApi: class TestWorkflowRunApi: - def test_not_workflow_app(self, app) -> None: + def test_not_workflow_app(self, app: Flask) -> None: api = WorkflowRunApi() handler = _unwrap(api.post) app_model = SimpleNamespace(mode=AppMode.CHAT.value) @@ -427,7 +428,7 @@ class TestWorkflowRunApi: with pytest.raises(NotWorkflowAppError): handler(api, app_model=app_model, end_user=end_user) - def test_rate_limit(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_rate_limit(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AppGenerateService, "generate", @@ -445,7 +446,7 @@ class TestWorkflowRunApi: class TestWorkflowRunByIdApi: - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AppGenerateService, "generate", @@ -461,7 +462,7 @@ class TestWorkflowRunByIdApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, workflow_id="w1") - def test_draft_workflow(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_draft_workflow(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AppGenerateService, "generate", @@ -479,7 +480,7 @@ class TestWorkflowRunByIdApi: class TestWorkflowTaskStopApi: - def test_wrong_mode(self, app) -> None: + def test_wrong_mode(self, app: Flask) -> None: api = WorkflowTaskStopApi() handler = _unwrap(api.post) app_model = SimpleNamespace(mode=AppMode.CHAT.value) @@ -489,7 +490,7 @@ class TestWorkflowTaskStopApi: with pytest.raises(NotWorkflowAppError): handler(api, app_model=app_model, end_user=end_user, task_id="t1") - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: stop_mock = Mock() send_mock = Mock() monkeypatch.setattr(AppQueueManager, "set_stop_flag_no_user_check", stop_mock) @@ -509,7 +510,7 @@ class TestWorkflowTaskStopApi: class TestWorkflowAppLogApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: class _BeginStub: def __enter__(self): return SimpleNamespace() @@ -577,7 +578,7 @@ class TestWorkflowRunDetailApiGet: self, mock_db, mock_repo_factory, - app, + app: Flask, mock_workflow_app, ): """Test successful workflow run detail retrieval.""" @@ -599,7 +600,7 @@ class TestWorkflowRunDetailApiGet: assert result["status"] == "succeeded" @patch("controllers.service_api.app.workflow.db") - def test_get_workflow_run_wrong_app_mode(self, mock_db, app): + def test_get_workflow_run_wrong_app_mode(self, mock_db, app: Flask): """Test NotWorkflowAppError when app mode is not workflow or advanced_chat.""" from controllers.service_api.app.workflow import WorkflowRunDetailApi @@ -624,7 +625,7 @@ class TestWorkflowTaskStopApiPost: self, mock_queue_mgr, mock_graph_mgr, - app, + app: Flask, mock_workflow_app, ): """Test successful workflow task stop.""" @@ -644,7 +645,7 @@ class TestWorkflowTaskStopApiPost: mock_graph_mgr.assert_called_once() mock_graph_mgr.return_value.send_stop_command.assert_called_once_with("task-1") - def test_stop_workflow_task_wrong_app_mode(self, app): + def test_stop_workflow_task_wrong_app_mode(self, app: Flask): """Test NotWorkflowAppError when app mode is not workflow.""" from controllers.service_api.app.workflow import WorkflowTaskStopApi @@ -669,7 +670,7 @@ class TestWorkflowAppLogApiGet: self, mock_db, mock_wf_svc_cls, - app, + app: Flask, mock_workflow_app, ): """Test successful workflow log retrieval.""" diff --git a/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py index f45a7f9632..b3edc2ecd8 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py @@ -9,6 +9,7 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.service_api.app.error import NotWorkflowAppError @@ -41,7 +42,7 @@ class TestWorkflowEventsApi: with pytest.raises(NotWorkflowAppError): handler(api, app_model=app_model, end_user=end_user, task_id="run-1") - def test_workflow_run_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_workflow_run_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: _mock_repo_for_run(monkeypatch, workflow_run=None) api = WorkflowEventsApi() handler = _unwrap(api.get) @@ -52,7 +53,7 @@ class TestWorkflowEventsApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, task_id="run-1") - def test_workflow_run_permission_denied(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_workflow_run_permission_denied(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", @@ -70,7 +71,7 @@ class TestWorkflowEventsApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, task_id="run-1") - def test_finished_run_returns_sse(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_finished_run_returns_sse(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", @@ -103,7 +104,7 @@ class TestWorkflowEventsApi: assert payload["task_id"] == "run-1" assert payload["event"] == "workflow_finished" - def test_running_run_streams_events(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_running_run_streams_events(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", @@ -135,7 +136,7 @@ class TestWorkflowEventsApi: ) workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) - def test_running_run_with_snapshot(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_running_run_with_snapshot(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", diff --git a/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py b/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py index f33c482d04..362af883ed 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py @@ -23,6 +23,7 @@ from datetime import UTC, datetime from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import Forbidden, NotFound @@ -373,7 +374,7 @@ class TestDatasourcePluginsApiGet: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.RagPipelineService") - def test_get_plugins_success(self, mock_svc_cls, mock_db, app): + def test_get_plugins_success(self, mock_svc_cls, mock_db, app: Flask): """Test successful retrieval of datasource plugins.""" tenant_id = str(uuid.uuid4()) dataset_id = str(uuid.uuid4()) @@ -396,7 +397,7 @@ class TestDatasourcePluginsApiGet: ) @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_get_plugins_not_found(self, mock_db, app): + def test_get_plugins_not_found(self, mock_db, app: Flask): """Test NotFound when dataset check fails.""" mock_db.session.scalar.return_value = None @@ -407,7 +408,7 @@ class TestDatasourcePluginsApiGet: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.RagPipelineService") - def test_get_plugins_empty_list(self, mock_svc_cls, mock_db, app): + def test_get_plugins_empty_list(self, mock_svc_cls, mock_db, app: Flask): """Test empty plugin list.""" mock_db.session.scalar.return_value = Mock() mock_svc_instance = Mock() @@ -439,7 +440,7 @@ class TestDatasourceNodeRunApiPost: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.RagPipelineService") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.service_api_ns") - def test_post_success(self, mock_ns, mock_db, mock_svc_cls, mock_current_user, mock_gen, mock_helper, app): + def test_post_success(self, mock_ns, mock_db, mock_svc_cls, mock_current_user, mock_gen, mock_helper, app: Flask): """Test successful datasource node run.""" tenant_id = str(uuid.uuid4()) dataset_id = str(uuid.uuid4()) @@ -473,7 +474,7 @@ class TestDatasourceNodeRunApiPost: mock_svc_instance.run_datasource_workflow_node.assert_called_once() @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_post_not_found(self, mock_db, app): + def test_post_not_found(self, mock_db, app: Flask): """Test NotFound when dataset check fails.""" mock_db.session.scalar.return_value = None @@ -488,7 +489,7 @@ class TestDatasourceNodeRunApiPost: ) @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.service_api_ns") - def test_post_fails_when_current_user_not_account(self, mock_ns, mock_db, app): + def test_post_fails_when_current_user_not_account(self, mock_ns, mock_db, app: Flask): """Test AssertionError when current_user is not an Account instance.""" mock_db.session.scalar.return_value = Mock() mock_ns.payload = { @@ -549,7 +550,7 @@ class TestPipelineRunApiPost: mock_gen_svc.generate.assert_called_once() @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_post_not_found(self, mock_db, app): + def test_post_not_found(self, mock_db, app: Flask): """Test NotFound when dataset check fails.""" mock_db.session.scalar.return_value = None @@ -561,7 +562,7 @@ class TestPipelineRunApiPost: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.current_user", new="not_account") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.service_api_ns") - def test_post_forbidden_non_account_user(self, mock_ns, mock_db, app): + def test_post_forbidden_non_account_user(self, mock_ns, mock_db, app: Flask): """Test Forbidden when current_user is not an Account.""" mock_db.session.scalar.return_value = Mock() mock_ns.payload = { @@ -585,7 +586,7 @@ class TestFileUploadApiPost: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.FileService") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.current_user") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_upload_success(self, mock_db, mock_current_user, mock_file_svc_cls, app): + def test_upload_success(self, mock_db, mock_current_user, mock_file_svc_cls, app: Flask): """Test successful file upload.""" mock_current_user.__bool__ = Mock(return_value=True) @@ -621,7 +622,7 @@ class TestFileUploadApiPost: assert response["name"] == "doc.pdf" assert response["extension"] == "pdf" - def test_upload_no_file(self, app): + def test_upload_no_file(self, app: Flask): """Test error when no file is uploaded.""" with app.test_request_context( "/datasets/pipeline/file-upload", diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py b/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py index e9c3e6d376..fe8fc02548 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py @@ -18,6 +18,7 @@ import uuid from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.service_api.dataset.segment import ( @@ -782,7 +783,7 @@ class TestSegmentApiGet: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -893,7 +894,7 @@ class TestSegmentApiPost: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -946,7 +947,7 @@ class TestSegmentApiPost: mock_db, mock_account_fn, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -989,7 +990,7 @@ class TestSegmentApiPost: mock_db, mock_account_fn, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1041,7 +1042,7 @@ class TestDatasetSegmentApiDelete: mock_doc_svc, mock_dataset_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1086,7 +1087,7 @@ class TestDatasetSegmentApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1128,7 +1129,7 @@ class TestDatasetSegmentApiDelete: mock_account_fn, mock_doc_svc, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1162,7 +1163,7 @@ class TestDatasetSegmentApiDelete: mock_account_fn, mock_dataset_svc, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1232,7 +1233,7 @@ class TestDatasetSegmentApiUpdate: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1282,7 +1283,7 @@ class TestDatasetSegmentApiUpdate: mock_account_fn, mock_dataset_svc, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1322,7 +1323,7 @@ class TestDatasetSegmentApiUpdate: mock_dataset_svc, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1374,7 +1375,7 @@ class TestDatasetSegmentApiGetSingle: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1421,7 +1422,7 @@ class TestDatasetSegmentApiGetSingle: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1460,7 +1461,7 @@ class TestDatasetSegmentApiGetSingle: self, mock_db, mock_account_fn, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1491,7 +1492,7 @@ class TestDatasetSegmentApiGetSingle: mock_account_fn, mock_dataset_svc, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1526,7 +1527,7 @@ class TestDatasetSegmentApiGetSingle: mock_dataset_svc, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1570,7 +1571,7 @@ class TestChildChunkApiGet: mock_doc_svc, mock_seg_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1609,7 +1610,7 @@ class TestChildChunkApiGet: self, mock_db, mock_account_fn, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1638,7 +1639,7 @@ class TestChildChunkApiGet: mock_db, mock_account_fn, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1670,7 +1671,7 @@ class TestChildChunkApiGet: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1729,7 +1730,7 @@ class TestChildChunkApiPost: mock_doc_svc, mock_seg_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1771,7 +1772,7 @@ class TestChildChunkApiPost: mock_feature_svc, mock_db, mock_account_fn, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1809,7 +1810,7 @@ class TestChildChunkApiPost: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1863,7 +1864,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1913,7 +1914,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1954,7 +1955,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1994,7 +1995,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py b/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py index b93a1cf14b..b7e24f9201 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py @@ -19,6 +19,7 @@ import uuid from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.service_api.dataset.metadata import ( @@ -76,7 +77,7 @@ class TestDatasetMetadataCreatePost: mock_dataset_svc, mock_meta_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -106,7 +107,7 @@ class TestDatasetMetadataCreatePost: def test_create_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -136,7 +137,7 @@ class TestDatasetMetadataCreateGet: self, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -160,7 +161,7 @@ class TestDatasetMetadataCreateGet: def test_get_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -201,7 +202,7 @@ class TestDatasetMetadataServiceApiPatch: mock_dataset_svc, mock_meta_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -232,7 +233,7 @@ class TestDatasetMetadataServiceApiPatch: def test_update_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -273,7 +274,7 @@ class TestDatasetMetadataServiceApiDelete: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -302,7 +303,7 @@ class TestDatasetMetadataServiceApiDelete: def test_delete_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -336,7 +337,7 @@ class TestDatasetMetadataBuiltInFieldGet: def test_get_built_in_fields_success( self, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -382,7 +383,7 @@ class TestDatasetMetadataBuiltInFieldAction: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -414,7 +415,7 @@ class TestDatasetMetadataBuiltInFieldAction: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -441,7 +442,7 @@ class TestDatasetMetadataBuiltInFieldAction: def test_action_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -485,7 +486,7 @@ class TestDocumentMetadataEditPost: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -513,7 +514,7 @@ class TestDocumentMetadataEditPost: def test_update_documents_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): diff --git a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py index 3cc444e467..9c310a4f45 100644 --- a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py +++ b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py @@ -3,6 +3,7 @@ from unittest.mock import Mock from uuid import UUID, uuid4 import pytest +from pytest_mock import MockerFixture from controllers.service_api.end_user.end_user import EndUserApi from controllers.service_api.end_user.error import EndUserNotFoundError @@ -21,7 +22,9 @@ class TestEndUserApi: app.tenant_id = str(uuid4()) return app - def test_get_end_user_returns_all_attributes(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_returns_all_attributes( + self, mocker: MockerFixture, resource: EndUserApi, app_model: App + ) -> None: end_user = Mock(spec=EndUser) end_user.id = str(uuid4()) end_user.tenant_id = app_model.tenant_id @@ -54,7 +57,7 @@ class TestEndUserApi: assert result["created_at"].startswith("2024-01-01T00:00:00") assert result["updated_at"].startswith("2024-01-02T00:00:00") - def test_get_end_user_not_found(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_not_found(self, mocker: MockerFixture, resource: EndUserApi, app_model: App) -> None: mocker.patch("controllers.service_api.end_user.end_user.EndUserService.get_end_user_by_id", return_value=None) with pytest.raises(EndUserNotFoundError): diff --git a/api/tests/unit_tests/controllers/service_api/test_index.py b/api/tests/unit_tests/controllers/service_api/test_index.py index c560a3c698..8441118181 100644 --- a/api/tests/unit_tests/controllers/service_api/test_index.py +++ b/api/tests/unit_tests/controllers/service_api/test_index.py @@ -5,6 +5,7 @@ Unit tests for Service API Index endpoint from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.service_api.index import IndexApi @@ -13,7 +14,7 @@ class TestIndexApi: """Test suite for IndexApi resource.""" @patch("controllers.service_api.index.dify_config", autospec=True) - def test_get_returns_api_info(self, mock_config, app): + def test_get_returns_api_info(self, mock_config, app: Flask): """Test that GET returns API metadata with correct structure.""" # Arrange mock_config.project.version = "1.0.0-test" @@ -32,7 +33,7 @@ class TestIndexApi: assert response["api_version"] == "v1" assert response["server_version"] == "1.0.0-test" - def test_get_response_has_required_fields(self, app): + def test_get_response_has_required_fields(self, app: Flask): """Test that response contains all required fields.""" # Arrange mock_config = MagicMock() diff --git a/api/tests/unit_tests/controllers/service_api/test_wraps.py b/api/tests/unit_tests/controllers/service_api/test_wraps.py index 6dfbdcf98e..30d7b92913 100644 --- a/api/tests/unit_tests/controllers/service_api/test_wraps.py +++ b/api/tests/unit_tests/controllers/service_api/test_wraps.py @@ -39,7 +39,7 @@ class TestValidateAndGetApiToken: app.config["TESTING"] = True return app - def test_missing_authorization_header(self, app): + def test_missing_authorization_header(self, app: Flask): """Test that Unauthorized is raised when Authorization header is missing.""" # Arrange with app.test_request_context("/", method="GET"): @@ -50,7 +50,7 @@ class TestValidateAndGetApiToken: validate_and_get_api_token("app") assert "Authorization header must be provided" in str(exc_info.value) - def test_invalid_auth_scheme(self, app): + def test_invalid_auth_scheme(self, app: Flask): """Test that Unauthorized is raised when auth scheme is not Bearer.""" # Arrange with app.test_request_context("/", method="GET", headers={"Authorization": "Basic token123"}): @@ -62,7 +62,7 @@ class TestValidateAndGetApiToken: @patch("controllers.service_api.wraps.record_token_usage") @patch("controllers.service_api.wraps.ApiTokenCache") @patch("controllers.service_api.wraps.fetch_token_with_single_flight") - def test_valid_token_returns_api_token(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app): + def test_valid_token_returns_api_token(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app: Flask): """Test that valid token returns the ApiToken object.""" # Arrange mock_api_token = Mock(spec=ApiToken) @@ -84,7 +84,7 @@ class TestValidateAndGetApiToken: @patch("controllers.service_api.wraps.record_token_usage") @patch("controllers.service_api.wraps.ApiTokenCache") @patch("controllers.service_api.wraps.fetch_token_with_single_flight") - def test_invalid_token_raises_unauthorized(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app): + def test_invalid_token_raises_unauthorized(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app: Flask): """Test that invalid token raises Unauthorized.""" # Arrange from werkzeug.exceptions import Unauthorized @@ -161,7 +161,7 @@ class TestValidateAppToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_app_not_found_raises_forbidden(self, mock_validate_token, mock_db, app): + def test_app_not_found_raises_forbidden(self, mock_validate_token, mock_db, app: Flask): """Test that Forbidden is raised when app no longer exists.""" # Arrange mock_api_token = Mock() @@ -182,7 +182,7 @@ class TestValidateAppToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_app_status_abnormal_raises_forbidden(self, mock_validate_token, mock_db, app): + def test_app_status_abnormal_raises_forbidden(self, mock_validate_token, mock_db, app: Flask): """Test that Forbidden is raised when app status is abnormal.""" # Arrange mock_api_token = Mock() @@ -205,7 +205,7 @@ class TestValidateAppToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_app_api_disabled_raises_forbidden(self, mock_validate_token, mock_db, app): + def test_app_api_disabled_raises_forbidden(self, mock_validate_token, mock_db, app: Flask): """Test that Forbidden is raised when app API is disabled.""" # Arrange mock_api_token = Mock() @@ -240,7 +240,7 @@ class TestCloudEditionBillingResourceCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_allows_when_under_limit(self, mock_get_features, mock_validate_token, app): + def test_allows_when_under_limit(self, mock_get_features, mock_validate_token, app: Flask): """Test that request is allowed when under resource limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -264,7 +264,7 @@ class TestCloudEditionBillingResourceCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_rejects_when_at_limit(self, mock_get_features, mock_validate_token, app): + def test_rejects_when_at_limit(self, mock_get_features, mock_validate_token, app: Flask): """Test that Forbidden is raised when at resource limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -287,7 +287,7 @@ class TestCloudEditionBillingResourceCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_allows_when_billing_disabled(self, mock_get_features, mock_validate_token, app): + def test_allows_when_billing_disabled(self, mock_get_features, mock_validate_token, app: Flask): """Test that request is allowed when billing is disabled.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -320,7 +320,7 @@ class TestCloudEditionBillingKnowledgeLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_rejects_add_segment_in_sandbox(self, mock_get_features, mock_validate_token, app): + def test_rejects_add_segment_in_sandbox(self, mock_get_features, mock_validate_token, app: Flask): """Test that add_segment is rejected in SANDBOX plan.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -342,7 +342,7 @@ class TestCloudEditionBillingKnowledgeLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_allows_other_operations_in_sandbox(self, mock_get_features, mock_validate_token, app): + def test_allows_other_operations_in_sandbox(self, mock_get_features, mock_validate_token, app: Flask): """Test that non-add_segment operations are allowed in SANDBOX.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -376,7 +376,7 @@ class TestCloudEditionBillingRateLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_knowledge_rate_limit") - def test_allows_within_rate_limit(self, mock_get_rate_limit, mock_validate_token, app): + def test_allows_within_rate_limit(self, mock_get_rate_limit, mock_validate_token, app: Flask): """Test that request is allowed when within rate limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -406,7 +406,7 @@ class TestCloudEditionBillingRateLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_knowledge_rate_limit") @patch("controllers.service_api.wraps.db") - def test_rejects_over_rate_limit(self, mock_db, mock_get_rate_limit, mock_validate_token, app): + def test_rejects_over_rate_limit(self, mock_db, mock_get_rate_limit, mock_validate_token, app: Flask): """Test that Forbidden is raised when over rate limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -445,7 +445,7 @@ class TestValidateDatasetToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.current_app") - def test_valid_dataset_token(self, mock_current_app, mock_validate_token, mock_db, mock_user_logged_in, app): + def test_valid_dataset_token(self, mock_current_app, mock_validate_token, mock_db, mock_user_logged_in, app: Flask): """Test that valid dataset token allows access.""" # Arrange # Use standard Mock for login_manager @@ -487,7 +487,7 @@ class TestValidateDatasetToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_dataset_not_found_raises_not_found(self, mock_validate_token, mock_db, app): + def test_dataset_not_found_raises_not_found(self, mock_validate_token, mock_db, app: Flask): """Test that NotFound is raised when dataset doesn't exist.""" # Arrange mock_api_token = Mock() diff --git a/api/tests/unit_tests/controllers/web/test_human_input_file_upload.py b/api/tests/unit_tests/controllers/web/test_human_input_file_upload.py new file mode 100644 index 0000000000..5786748ba3 --- /dev/null +++ b/api/tests/unit_tests/controllers/web/test_human_input_file_upload.py @@ -0,0 +1,185 @@ +"""Unit tests for HITL human input file upload endpoints.""" + +from __future__ import annotations + +from datetime import datetime +from io import BytesIO +from types import SimpleNamespace +from unittest.mock import MagicMock + +import pytest +from flask import Flask + +import controllers.web.human_input_file_upload as upload_module +from controllers.common.errors import NoFileUploadedError +from controllers.web.human_input_file_upload import ( + HumanInputFileUploadApi, + HumanInputRemoteFileUploadApi, + InvalidUploadTokenForbiddenError, + InvalidUploadTokenUnauthorizedError, +) + + +@pytest.fixture +def app() -> Flask: + app = Flask(__name__) + app.config["TESTING"] = True + return app + + +def _upload_context() -> SimpleNamespace: + return SimpleNamespace( + form_id="form-1", + upload_token_id="token-row-1", + owner=SimpleNamespace(id="owner-1", current_tenant_id="tenant-1"), + ) + + +def _upload_file() -> SimpleNamespace: + return SimpleNamespace( + id="file-1", + name="sample.txt", + size=7, + extension="txt", + mime_type="text/plain", + created_by="end-user-1", + created_at=datetime(2024, 1, 1), + tenant_id="tenant-1", + source_url="signed-source-url", + ) + + +def test_local_upload_requires_authorization_before_reading_files(app: Flask) -> None: + data = {"file": (BytesIO(b"content"), "sample.txt")} + + with app.test_request_context( + "/api/form/human_input/files/upload", + method="POST", + data=data, + content_type="multipart/form-data", + ): + with pytest.raises(InvalidUploadTokenUnauthorizedError): + HumanInputFileUploadApi().post() + + +def test_local_upload_ignores_source_and_records_form_file_link(monkeypatch: pytest.MonkeyPatch, app: Flask) -> None: + service = MagicMock() + service.validate_upload_token.return_value = _upload_context() + monkeypatch.setattr(upload_module, "HumanInputFileUploadService", lambda engine: service) + + file_service = MagicMock() + file_service.upload_file.return_value = _upload_file() + file_service_cls = MagicMock(return_value=file_service) + monkeypatch.setattr(upload_module, "FileService", file_service_cls) + monkeypatch.setattr(upload_module, "db", SimpleNamespace(engine=object())) + + data = { + "file": (BytesIO(b"content"), "sample.txt"), + "source": "datasets", + } + with app.test_request_context( + "/api/form/human_input/files/upload", + method="POST", + headers={"Authorization": "bearer hitl_upload_token-1"}, + data=data, + content_type="multipart/form-data", + ): + result, status = HumanInputFileUploadApi().post() + + assert status == 201 + assert result["id"] == "file-1" + file_service.upload_file.assert_called_once() + assert file_service.upload_file.call_args.kwargs["source"] is None + assert file_service.upload_file.call_args.kwargs["user"].id == "owner-1" + service.record_upload_file.assert_called_once_with( + context=service.validate_upload_token.return_value, + file_id="file-1", + ) + + +def test_local_upload_missing_file_raises_after_valid_token(monkeypatch: pytest.MonkeyPatch, app: Flask) -> None: + service = MagicMock() + service.validate_upload_token.return_value = _upload_context() + monkeypatch.setattr(upload_module, "HumanInputFileUploadService", lambda engine: service) + monkeypatch.setattr(upload_module, "db", SimpleNamespace(engine=object())) + + with app.test_request_context( + "/api/form/human_input/files/upload", + method="POST", + headers={"Authorization": "bearer hitl_upload_token-1"}, + content_type="multipart/form-data", + ): + with pytest.raises(NoFileUploadedError): + HumanInputFileUploadApi().post() + + service.validate_upload_token.assert_called_once_with("hitl_upload_token-1") + + +def test_remote_upload_validates_token_before_fetching_remote_url(monkeypatch: pytest.MonkeyPatch, app: Flask) -> None: + service = MagicMock() + service.validate_upload_token.side_effect = InvalidUploadTokenForbiddenError() + monkeypatch.setattr(upload_module, "HumanInputFileUploadService", lambda engine: service) + monkeypatch.setattr(upload_module, "db", SimpleNamespace(engine=object())) + ssrf_proxy = MagicMock() + monkeypatch.setattr(upload_module, "ssrf_proxy", ssrf_proxy) + + with app.test_request_context( + "/api/form/human_input/files/remote-upload", + method="POST", + headers={"Authorization": "Bearer hitl_upload_token-1"}, + json={"url": "https://example.com/file.txt"}, + ): + with pytest.raises(InvalidUploadTokenForbiddenError): + HumanInputRemoteFileUploadApi().post() + + ssrf_proxy.head.assert_not_called() + ssrf_proxy.get.assert_not_called() + + +def test_remote_upload_records_form_file_link(monkeypatch: pytest.MonkeyPatch, app: Flask) -> None: + service = MagicMock() + service.validate_upload_token.return_value = _upload_context() + monkeypatch.setattr(upload_module, "HumanInputFileUploadService", lambda engine: service) + monkeypatch.setattr(upload_module, "db", SimpleNamespace(engine=object())) + + response = MagicMock() + response.status_code = 200 + response.content = b"remote" + response.request.method = "GET" + ssrf_proxy = MagicMock() + ssrf_proxy.head.return_value = response + monkeypatch.setattr(upload_module, "ssrf_proxy", ssrf_proxy) + monkeypatch.setattr( + upload_module.helpers, + "guess_file_info_from_response", + lambda _response: SimpleNamespace(filename="sample.txt", extension="txt", mimetype="text/plain", size=6), + ) + + file_service = MagicMock() + file_service.upload_file.return_value = _upload_file() + file_service_cls = MagicMock(return_value=file_service) + file_service_cls.is_file_size_within_limit.return_value = True + monkeypatch.setattr(upload_module, "FileService", file_service_cls) + monkeypatch.setattr( + upload_module.file_helpers, + "get_signed_file_url", + lambda upload_file_id: f"signed:{upload_file_id}", + ) + + with app.test_request_context( + "/api/form/human_input/files/remote-upload", + method="POST", + headers={"Authorization": "Bearer hitl_upload_token-1"}, + json={"url": "https://example.com/file.txt"}, + ): + result, status = HumanInputRemoteFileUploadApi().post() + + assert status == 201 + assert result["url"] == "signed:file-1" + file_service.upload_file.assert_called_once() + assert file_service.upload_file.call_args.kwargs["source_url"] == "https://example.com/file.txt" + assert file_service.upload_file.call_args.kwargs["user"].id == "owner-1" + service.record_upload_file.assert_called_once_with( + context=service.validate_upload_token.return_value, + file_id="file-1", + ) diff --git a/api/tests/unit_tests/controllers/web/test_human_input_form.py b/api/tests/unit_tests/controllers/web/test_human_input_form.py index 5f2dc19aab..4c5423f2fb 100644 --- a/api/tests/unit_tests/controllers/web/test_human_input_form.py +++ b/api/tests/unit_tests/controllers/web/test_human_input_form.py @@ -19,6 +19,7 @@ from models.human_input import RecipientType from services.human_input_service import FormExpiredError HumanInputFormApi = human_input_module.HumanInputFormApi +HumanInputFormUploadTokenApi = human_input_module.HumanInputFormUploadTokenApi TenantStatus = human_input_module.TenantStatus @@ -180,6 +181,35 @@ def test_get_form_includes_site(monkeypatch: pytest.MonkeyPatch, app: Flask): limiter_mock.increment_rate_limit.assert_called_once_with("203.0.113.10") +def test_create_upload_token_returns_token_and_form_expiration(monkeypatch: pytest.MonkeyPatch, app: Flask): + """POST returns a HITL upload token for an active form token.""" + + expiration_time = datetime(2099, 1, 1, tzinfo=UTC) + service_mock = MagicMock() + service_mock.issue_upload_token.return_value = SimpleNamespace( + upload_token="hitl_upload_token-1", + expires_at=expiration_time, + ) + monkeypatch.setattr(human_input_module, "HumanInputFileUploadService", lambda engine: service_mock) + monkeypatch.setattr(human_input_module, "db", SimpleNamespace(engine=object())) + + limiter_mock = MagicMock() + limiter_mock.is_rate_limited.return_value = False + monkeypatch.setattr(human_input_module, "_FORM_UPLOAD_TOKEN_RATE_LIMITER", limiter_mock) + monkeypatch.setattr(human_input_module, "extract_remote_ip", lambda req: "203.0.113.10") + + with app.test_request_context("/api/form/human_input/token-1/upload-token", method="POST"): + result, status = HumanInputFormUploadTokenApi().post("token-1") + + assert status == 200 + assert result == { + "upload_token": "hitl_upload_token-1", + "expires_at": int(expiration_time.timestamp()), + } + service_mock.issue_upload_token.assert_called_once_with("token-1") + limiter_mock.increment_rate_limit.assert_called_once_with("203.0.113.10") + + def test_get_form_allows_backstage_token(monkeypatch: pytest.MonkeyPatch, app: Flask): """GET returns form payload for backstage token.""" diff --git a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py index 9073ae1044..c1a4da8cd3 100644 --- a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py +++ b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py @@ -12,12 +12,13 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.output_parser.cot_output_parser import CotAgentOutputParser @pytest.fixture -def mock_action_class(mocker): +def mock_action_class(mocker: MockerFixture): mock_action = MagicMock() mocker.patch( "core.agent.output_parser.cot_output_parser.AgentScratchpadUnit.Action", diff --git a/api/tests/unit_tests/core/agent/strategy/test_plugin.py b/api/tests/unit_tests/core/agent/strategy/test_plugin.py index e0894f1e90..0fea04845d 100644 --- a/api/tests/unit_tests/core/agent/strategy/test_plugin.py +++ b/api/tests/unit_tests/core/agent/strategy/test_plugin.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.strategy.plugin import PluginAgentStrategy @@ -213,7 +214,9 @@ class TestInvoke: (None, None, "msg"), ], ) - def test_invoke_optional_arguments(self, strategy, mocker, conversation_id, app_id, message_id) -> None: + def test_invoke_optional_arguments( + self, strategy, mocker: MockerFixture, conversation_id, app_id, message_id + ) -> None: mock_manager = MagicMock() mock_manager.invoke = MagicMock(return_value=iter([])) diff --git a/api/tests/unit_tests/core/agent/test_base_agent_runner.py b/api/tests/unit_tests/core/agent/test_base_agent_runner.py index db4b293b16..d5fb853ee3 100644 --- a/api/tests/unit_tests/core/agent/test_base_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_base_agent_runner.py @@ -3,6 +3,7 @@ from decimal import Decimal from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.agent.base_agent_runner as module from core.agent.base_agent_runner import BaseAgentRunner @@ -13,7 +14,7 @@ from core.agent.base_agent_runner import BaseAgentRunner @pytest.fixture -def mock_db_session(mocker): +def mock_db_session(mocker: MockerFixture): session = mocker.MagicMock() mocker.patch.object(module.db, "session", session) return session @@ -41,13 +42,13 @@ def runner(mocker, mock_db_session): class TestRepack: - def test_sets_empty_if_none(self, runner, mocker): + def test_sets_empty_if_none(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = None result = runner._repack_app_generate_entity(entity) assert result.app_config.prompt_template.simple_prompt_template == "" - def test_keeps_existing(self, runner, mocker): + def test_keeps_existing(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = "abc" result = runner._repack_app_generate_entity(entity) @@ -60,7 +61,7 @@ class TestRepack: class TestUpdatePromptTool: - def build_param(self, mocker, **kwargs): + def build_param(self, mocker: MockerFixture, **kwargs): p = mocker.MagicMock() p.form = kwargs.get("form") @@ -75,7 +76,7 @@ class TestUpdatePromptTool: p.required = kwargs.get("required", False) return p - def test_skip_non_llm(self, runner, mocker): + def test_skip_non_llm(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form="NOT_LLM") tool.get_runtime_parameters.return_value = [param] @@ -86,7 +87,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_enum_and_required(self, runner, mocker): + def test_enum_and_required(self, runner, mocker: MockerFixture): option = mocker.MagicMock(value="opt1") param = self.build_param( mocker, @@ -104,7 +105,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert "p1" in result.parameters["required"] - def test_skip_file_type_param(self, runner, mocker): + def test_skip_file_type_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form=module.ToolParameter.ToolParameterForm.LLM) param.type = module.ToolParameter.ToolParameterType.FILE @@ -116,7 +117,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_duplicate_required_not_duplicated(self, runner, mocker): + def test_duplicate_required_not_duplicated(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param( @@ -141,7 +142,7 @@ class TestUpdatePromptTool: class TestCreateAgentThought: - def test_with_files(self, runner, mock_db_session, mocker): + def test_with_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=10) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -149,7 +150,7 @@ class TestCreateAgentThought: assert result == "10" assert runner.agent_thought_count == 1 - def test_without_files(self, runner, mock_db_session, mocker): + def test_without_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=11) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -163,7 +164,7 @@ class TestCreateAgentThought: class TestSaveAgentThought: - def setup_agent(self, mocker): + def setup_agent(self, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;tool2" agent.tool_labels = {} @@ -175,7 +176,7 @@ class TestSaveAgentThought: with pytest.raises(ValueError): runner.save_agent_thought("id", None, None, None, None, None, None, [], None) - def test_full_update(self, runner, mock_db_session, mocker): + def test_full_update(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -210,7 +211,7 @@ class TestSaveAgentThought: assert agent.tokens == 3 assert "tool1" in json.loads(agent.tool_labels_str) - def test_label_fallback_when_none(self, runner, mock_db_session, mocker): + def test_label_fallback_when_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) agent.tool = "unknown_tool" mock_db_session.scalar.return_value = agent @@ -220,7 +221,7 @@ class TestSaveAgentThought: labels = json.loads(agent.tool_labels_str) assert "unknown_tool" in labels - def test_json_failure_paths(self, runner, mock_db_session, mocker): + def test_json_failure_paths(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -241,13 +242,13 @@ class TestSaveAgentThought: assert mock_db_session.commit.called - def test_messages_ids_none(self, runner, mock_db_session, mocker): + def test_messages_ids_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent runner.save_agent_thought("id", None, None, None, None, None, None, None, None) assert mock_db_session.commit.called - def test_success_dict_serialization(self, runner, mock_db_session, mocker): + def test_success_dict_serialization(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -273,19 +274,19 @@ class TestSaveAgentThought: class TestOrganizeUserPrompt: - def test_no_files(self, runner, mock_db_session, mocker): + def test_no_files(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_with_files_no_config(self, runner, mock_db_session, mocker): + def test_with_files_no_config(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_image_detail_low_fallback(self, runner, mock_db_session, mocker): + def test_image_detail_low_fallback(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() file_config.image_config = mocker.MagicMock(detail=None) @@ -305,27 +306,27 @@ class TestOrganizeUserPrompt: class TestOrganizeHistory: - def test_empty(self, runner, mock_db_session, mocker): + def test_empty(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) result = runner.organize_agent_history([]) assert result == [] - def test_with_answer_only(self, runner, mock_db_session, mocker): + def test_with_answer_only(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="m1", answer="ans", agent_thoughts=[], app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert any(isinstance(x, module.AssistantPromptMessage) for x in result) - def test_skip_current_message(self, runner, mock_db_session, mocker): + def test_skip_current_message(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="msg_current", agent_thoughts=[], answer="ans", app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert result == [] - def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker): + def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input="invalid", @@ -341,7 +342,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_empty_tool_name_split(self, runner, mock_db_session, mocker): + def test_empty_tool_name_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=";", thought="thinking") msg = mocker.MagicMock(id="m5", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -350,7 +351,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_valid_json_tool_flow(self, runner, mock_db_session, mocker): + def test_valid_json_tool_flow(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=json.dumps({"tool1": {"x": 1}}), @@ -379,7 +380,7 @@ class TestOrganizeHistory: class TestConvertToolToPromptMessageTool: - def test_basic_conversion(self, runner, mocker): + def test_basic_conversion(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") runtime_param = mocker.MagicMock() @@ -404,7 +405,7 @@ class TestConvertToolToPromptMessageTool: prompt_tool, entity = runner._convert_tool_to_prompt_message_tool(tool) assert entity == tool_entity - def test_full_conversion_multiple_params(self, runner, mocker): + def test_full_conversion_multiple_params(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") # LLM param with input_schema override @@ -441,7 +442,7 @@ class TestConvertToolToPromptMessageTool: class TestInitPromptToolsExtended: - def test_agent_tool_branch(self, runner, mocker): + def test_agent_tool_branch(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="agent_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", return_value=(MagicMock(), "entity")) @@ -449,7 +450,7 @@ class TestInitPromptToolsExtended: tools, prompts = runner._init_prompt_tools() assert "agent_tool" in tools - def test_exception_in_conversion(self, runner, mocker): + def test_exception_in_conversion(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="bad_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", side_effect=Exception) @@ -464,7 +465,7 @@ class TestInitPromptToolsExtended: class TestAdditionalCoverage: - def test_update_prompt_with_input_schema(self, runner, mocker): + def test_update_prompt_with_input_schema(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = mocker.MagicMock() @@ -487,7 +488,7 @@ class TestAdditionalCoverage: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"]["p1"]["type"] == "number" - def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker): + def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {"tool1": {"en_US": "existing"}} @@ -498,7 +499,7 @@ class TestAdditionalCoverage: labels = json.loads(agent.tool_labels_str) assert labels["tool1"]["en_US"] == "existing" - def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker): + def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -508,7 +509,7 @@ class TestAdditionalCoverage: runner.save_agent_thought("id", None, None, None, None, "meta_string", None, [], None) assert agent.tool_meta_str == "meta_string" - def test_convert_dataset_retriever_tool(self, runner, mocker): + def test_convert_dataset_retriever_tool(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -525,7 +526,7 @@ class TestAdditionalCoverage: prompt = runner._convert_dataset_retriever_tool_to_prompt_message_tool(ds_tool) assert prompt is not None - def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker): + def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() @@ -544,7 +545,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_user_prompt(msg) assert result is not None - def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker): + def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=None, thought="thinking") msg = mocker.MagicMock(id="m3", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -554,7 +555,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker): + def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1;tool2", tool_input=json.dumps({"tool1": {}, "tool2": {}}), @@ -572,7 +573,7 @@ class TestAdditionalCoverage: # ================= Additional Surgical Coverage ================= - def test_convert_tool_select_enum_branch(self, runner, mocker): + def test_convert_tool_select_enum_branch(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -599,7 +600,7 @@ class TestAdditionalCoverage: class TestConvertDatasetRetrieverTool: - def test_required_param_added(self, runner, mocker): + def test_required_param_added(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -619,7 +620,7 @@ class TestConvertDatasetRetrieverTool: class TestBaseAgentRunnerInit: - def test_init_sets_stream_tool_call_and_files(self, mocker): + def test_init_sets_stream_tool_call_and_files(self, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = 2 mocker.patch.object(module.db, "session", session) @@ -662,7 +663,7 @@ class TestBaseAgentRunnerInit: class TestBaseAgentRunnerCoverage: - def test_convert_tool_skips_non_llm_param(self, runner, mocker): + def test_convert_tool_skips_non_llm_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -680,7 +681,7 @@ class TestBaseAgentRunnerCoverage: assert prompt_tool.parameters["properties"] == {} - def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker): + def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker: MockerFixture): dataset_tool = mocker.MagicMock() dataset_tool.entity.identity.name = "ds" runner.dataset_tools = [dataset_tool] @@ -692,7 +693,7 @@ class TestBaseAgentRunnerCoverage: assert tools["ds"] == dataset_tool assert len(prompt_tools) == 1 - def test_update_prompt_message_tool_select_enum(self, runner, mocker): + def test_update_prompt_message_tool_select_enum(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() option1 = mocker.MagicMock(value="A") @@ -716,7 +717,7 @@ class TestBaseAgentRunnerCoverage: assert result.parameters["properties"]["select_param"]["enum"] == ["A", "B"] - def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker): + def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -754,7 +755,7 @@ class TestBaseAgentRunnerCoverage: assert isinstance(agent.observation, str) assert isinstance(agent.tool_meta_str, str) - def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker): + def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;;" agent.tool_labels = {} @@ -768,7 +769,7 @@ class TestBaseAgentRunnerCoverage: labels = json.loads(agent.tool_labels_str) assert "" not in labels - def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker): + def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) @@ -778,7 +779,7 @@ class TestBaseAgentRunnerCoverage: assert system_message in result - def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker): + def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=None, diff --git a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py index cde8820e00..314305d371 100644 --- a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py @@ -2,6 +2,7 @@ import json from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.cot_agent_runner import CotAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -25,7 +26,7 @@ class DummyRunner(CotAgentRunner): @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Prevent BaseAgentRunner __init__ from hitting database mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.organize_agent_history", @@ -165,7 +166,7 @@ class TestHandleInvokeAction: response, meta = runner._handle_invoke_action(action, {}, []) assert "there is not a tool named" in response - def test_tool_with_json_string_args(self, runner, mocker): + def test_tool_with_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input=json.dumps({"a": 1})) tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -180,7 +181,7 @@ class TestHandleInvokeAction: class TestOrganizeHistoricPromptMessages: - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch( "core.agent.cot_agent_runner.AgentHistoryPromptTransform.get_prompt", return_value=[], @@ -190,7 +191,7 @@ class TestOrganizeHistoricPromptMessages: class TestRun: - def test_run_handles_empty_parser_output(self, runner, mocker): + def test_run_handles_empty_parser_output(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -202,7 +203,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert isinstance(results, list) - def test_run_with_action_and_tool_invocation(self, runner, mocker): + def test_run_with_action_and_tool_invocation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -223,7 +224,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_respects_max_iteration_boundary(self, runner, mocker): + def test_run_respects_max_iteration_boundary(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 1 message = MagicMock() message.id = "msg-id" @@ -245,7 +246,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_basic_flow(self, runner, mocker): + def test_run_basic_flow(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -257,7 +258,7 @@ class TestRun: results = list(runner.run(message, "query", {"name": "John"})) assert results - def test_run_max_iteration_error(self, runner, mocker): + def test_run_max_iteration_error(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 0 message = MagicMock() message.id = "msg-id" @@ -272,7 +273,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {})) - def test_run_increase_usage_aggregation(self, runner, mocker): + def test_run_increase_usage_aggregation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" runner.app_config.agent.max_iteration = 2 @@ -329,7 +330,7 @@ class TestRun: assert final_usage.completion_price == 2 assert final_usage.total_price == 4 - def test_run_when_no_action_branch(self, runner, mocker): + def test_run_when_no_action_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -341,7 +342,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "" - def test_run_usage_missing_key_branch(self, runner, mocker): + def test_run_usage_missing_key_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -354,7 +355,7 @@ class TestRun: list(runner.run(message, "query", {})) - def test_run_prompt_tool_update_branch(self, runner, mocker): + def test_run_prompt_tool_update_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -410,7 +411,7 @@ class TestRun: class TestInitReactState: - def test_init_react_state_resets_state(self, runner, mocker): + def test_init_react_state_resets_state(self, runner, mocker: MockerFixture): mocker.patch.object(runner, "_organize_historic_prompt_messages", return_value=["historic"]) runner._agent_scratchpad = ["old"] runner._query = "old" @@ -423,7 +424,7 @@ class TestInitReactState: class TestHandleInvokeActionExtended: - def test_tool_with_invalid_json_string_args(self, runner, mocker): + def test_tool_with_invalid_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input="not-json") tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -457,7 +458,7 @@ class TestFillInputsEdgeCases: class TestOrganizeHistoricPromptMessagesExtended: - def test_user_message_flushes_scratchpad(self, runner, mocker): + def test_user_message_flushes_scratchpad(self, runner, mocker: MockerFixture): from graphon.model_runtime.entities.message_entities import UserPromptMessage user_message = UserPromptMessage(content="Hi") @@ -480,7 +481,7 @@ class TestOrganizeHistoricPromptMessagesExtended: with pytest.raises(NotImplementedError): runner._organize_historic_prompt_messages([]) - def test_agent_history_transform_invocation(self, runner, mocker): + def test_agent_history_transform_invocation(self, runner, mocker: MockerFixture): mock_transform = MagicMock() mock_transform.get_prompt.return_value = [] @@ -495,7 +496,7 @@ class TestOrganizeHistoricPromptMessagesExtended: class TestRunAdditionalBranches: - def test_run_with_no_action_final_answer_empty(self, runner, mocker): + def test_run_with_no_action_final_answer_empty(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -507,7 +508,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert any(hasattr(r, "delta") for r in results) - def test_run_with_final_answer_action_string(self, runner, mocker): + def test_run_with_final_answer_action_string(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -521,7 +522,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "done" - def test_run_with_final_answer_action_dict(self, runner, mocker): + def test_run_with_final_answer_action_dict(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -535,7 +536,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert json.loads(results[-1].delta.message.content) == {"a": 1} - def test_run_with_string_final_answer(self, runner, mocker): + def test_run_with_string_final_answer(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" diff --git a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py index ea8cc8aa86..8e7093fd12 100644 --- a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from pytest_mock import MockerFixture from core.agent.cot_chat_agent_runner import CotChatAgentRunner from graphon.model_runtime.entities.message_entities import TextPromptMessageContent @@ -55,7 +56,7 @@ def runner(): class TestOrganizeSystemPrompt: - def test_organize_system_prompt_success(self, runner, mocker): + def test_organize_system_prompt_success(self, runner, mocker: MockerFixture): first_prompt = "Instruction: {{instruction}}, Tools: {{tools}}, Names: {{tool_names}}" runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt(first_prompt))) @@ -154,7 +155,7 @@ class TestOrganizeUserQuery: class TestOrganizePromptMessages: - def test_no_scratchpad(self, runner, mocker): + def test_no_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -164,7 +165,7 @@ class TestOrganizePromptMessages: assert "query" in result runner._organize_historic_prompt_messages.assert_called_once() - def test_with_final_scratchpad(self, runner, mocker): + def test_with_final_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -177,7 +178,7 @@ class TestOrganizePromptMessages: combined = "".join([m.content for m in assistant_msgs if isinstance(m.content, str)]) assert "Final Answer: done" in combined - def test_with_thought_action_observation(self, runner, mocker): + def test_with_thought_action_observation(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -197,7 +198,7 @@ class TestOrganizePromptMessages: assert "Action: action" in combined assert "Observation: observe" in combined - def test_multiple_units_mixed(self, runner, mocker): + def test_multiple_units_mixed(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) diff --git a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py index 2f5873d865..0d949c357d 100644 --- a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner from graphon.model_runtime.entities.message_entities import ( @@ -74,7 +75,7 @@ class TestOrganizeInstructionPrompt: class TestOrganizeHistoricPrompt: - def test_with_user_and_assistant_string(self, runner, mocker): + def test_with_user_and_assistant_string(self, runner, mocker: MockerFixture): user_msg = UserPromptMessage(content="Hello") assistant_msg = AssistantPromptMessage(content="Hi there") @@ -89,7 +90,7 @@ class TestOrganizeHistoricPrompt: assert "Question: Hello" in result assert "Hi there" in result - def test_assistant_list_with_text_content(self, runner, mocker): + def test_assistant_list_with_text_content(self, runner, mocker: MockerFixture): text_content = TextPromptMessageContent(data="Partial answer") assistant_msg = AssistantPromptMessage(content=[text_content]) @@ -103,7 +104,7 @@ class TestOrganizeHistoricPrompt: assert "Partial answer" in result - def test_assistant_list_with_non_text_content_ignored(self, runner, mocker): + def test_assistant_list_with_non_text_content_ignored(self, runner, mocker: MockerFixture): non_text_content = ImagePromptMessageContent(format="url", mime_type="image/png") assistant_msg = AssistantPromptMessage(content=[non_text_content]) @@ -116,7 +117,7 @@ class TestOrganizeHistoricPrompt: result = runner._organize_historic_prompt() assert result == "" - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch.object( runner, "_organize_historic_prompt_messages", @@ -136,7 +137,7 @@ class TestOrganizePromptMessages: def test_full_flow_with_scratchpad( self, runner, - mocker, + mocker: MockerFixture, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory, @@ -171,7 +172,12 @@ class TestOrganizePromptMessages: assert "Question: What is Python?" in content def test_no_scratchpad( - self, runner, mocker, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory + self, + runner, + mocker: MockerFixture, + dummy_app_config_factory, + dummy_agent_config_factory, + dummy_prompt_entity_factory, ): template = "SYS {{historic_messages}} {{agent_scratchpad}} {{query}}" @@ -198,7 +204,7 @@ class TestOrganizePromptMessages: def test_partial_scratchpad_units( self, runner, - mocker, + mocker: MockerFixture, thought, action, observation, diff --git a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py index 17ab5babcb..3a4347e723 100644 --- a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py @@ -3,6 +3,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.errors import AgentMaxIterationError from core.agent.fc_agent_runner import FunctionCallAgentRunner @@ -68,7 +69,7 @@ class DummyResult: @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Completely bypass BaseAgentRunner __init__ to avoid DB / Flask context mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.__init__", @@ -230,7 +231,7 @@ class TestOrganizeUserQuery: result = runner._organize_user_query(None, []) assert len(result) == 1 - def test_with_files_uses_image_detail_config(self, runner, mocker): + def test_with_files_uses_image_detail_config(self, runner, mocker: MockerFixture): file_content = TextPromptMessageContent(data="file-content") mock_to_prompt = mocker.patch( "core.agent.fc_agent_runner.file_manager.to_prompt_message_content", @@ -352,7 +353,7 @@ class TestRunMethod: assert len(outputs) == 1 assert runner.save_agent_thought.call_args.kwargs["thought"] == "hi" - def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker): + def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") runner.stream_tool_call = True @@ -398,7 +399,7 @@ class TestRunMethod: outputs = list(runner.run(message, "query")) assert len(outputs) >= 1 - def test_run_with_tool_instance_and_files(self, runner, mocker): + def test_run_with_tool_instance_and_files(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") tool_call = MagicMock() diff --git a/api/tests/unit_tests/core/agent/test_plugin_entities.py b/api/tests/unit_tests/core/agent/test_plugin_entities.py index 9955190aca..aa3098a2a1 100644 --- a/api/tests/unit_tests/core/agent/test_plugin_entities.py +++ b/api/tests/unit_tests/core/agent/test_plugin_entities.py @@ -9,6 +9,7 @@ mocking; ensure entity invariants and validation rules remain stable. import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.agent.plugin_entities import ( AgentFeature, @@ -28,12 +29,12 @@ from core.tools.entities.tool_entities import ToolIdentity, ToolProviderIdentity @pytest.fixture -def mock_identity(mocker): +def mock_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyIdentity) @pytest.fixture -def mock_provider_identity(mocker): +def mock_provider_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyProviderIdentity) @@ -47,7 +48,7 @@ class TestAgentStrategyParameterType: "enum_member", list(AgentStrategyParameter.AgentStrategyParameterType), ) - def test_as_normal_type_calls_external_function(self, mocker, enum_member) -> None: + def test_as_normal_type_calls_external_function(self, mocker: MockerFixture, enum_member) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.as_normal_type", return_value="normalized", @@ -58,7 +59,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member) assert result == "normalized" - def test_as_normal_type_propagates_exception(self, mocker) -> None: + def test_as_normal_type_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.as_normal_type", @@ -79,7 +80,7 @@ class TestAgentStrategyParameterType: (AgentStrategyParameter.AgentStrategyParameterType.FILES, []), ], ) - def test_cast_value_calls_external_function(self, mocker, enum_member, value) -> None: + def test_cast_value_calls_external_function(self, mocker: MockerFixture, enum_member, value) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.cast_parameter_value", return_value="casted", @@ -90,7 +91,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member, value) assert result == "casted" - def test_cast_value_propagates_exception(self, mocker) -> None: + def test_cast_value_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.cast_parameter_value", @@ -136,7 +137,7 @@ class TestAgentStrategyParameter: assert any(error["loc"] == ("type",) for error in exc_info.value.errors()) - def test_init_frontend_parameter_calls_external(self, mocker) -> None: + def test_init_frontend_parameter_calls_external(self, mocker: MockerFixture) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", return_value="frontend", @@ -153,7 +154,7 @@ class TestAgentStrategyParameter: mock_func.assert_called_once_with(param, param.type, "value") assert result == "frontend" - def test_init_frontend_parameter_propagates_exception(self, mocker) -> None: + def test_init_frontend_parameter_propagates_exception(self, mocker: MockerFixture) -> None: mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", side_effect=RuntimeError("error"), diff --git a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py index 1c5b6ed944..6dbf301f65 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py @@ -10,7 +10,7 @@ class TestGetParametersFromFeatureDict: """Test suite for get_parameters_from_feature_dict""" @pytest.fixture - def mock_config(self, monkeypatch): + def mock_config(self, monkeypatch: pytest.MonkeyPatch): """Mock dify_config values""" mock = MagicMock() mock.UPLOAD_IMAGE_FILE_SIZE_LIMIT = 1 @@ -23,7 +23,7 @@ class TestGetParametersFromFeatureDict: return mock @pytest.fixture - def mock_default_file_limits(self, monkeypatch): + def mock_default_file_limits(self, monkeypatch: pytest.MonkeyPatch): """Mock DEFAULT_FILE_NUMBER_LIMITS constant""" monkeypatch.setattr(parameters_mapping, "DEFAULT_FILE_NUMBER_LIMITS", 99) return 99 diff --git a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py index 013ed0cbc4..bd4ca5ff85 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.common.sensitive_word_avoidance.manager import ( SensitiveWordAvoidanceConfigManager, @@ -26,7 +27,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result is None - def test_convert_returns_entity_when_enabled(self, mocker): + def test_convert_returns_entity_when_enabled(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() mocker.patch( @@ -48,7 +49,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result == mock_entity - def test_convert_enabled_without_type_or_config(self, mocker): + def test_convert_enabled_without_type_or_config(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() patched = mocker.patch( @@ -135,7 +136,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: with pytest.raises(ValueError, match="must be a dict"): SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(tenant_id="tenant1", config=config) - def test_validate_calls_moderation_factory(self, mocker): + def test_validate_calls_moderation_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -159,7 +160,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: assert result_config["sensitive_word_avoidance"]["enabled"] is True assert fields == ["sensitive_word_avoidance"] - def test_validate_sets_empty_dict_when_config_none(self, mocker): + def test_validate_sets_empty_dict_when_config_none(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -179,7 +180,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: # Assert mock_validate.assert_called_once_with(name="mock_type", tenant_id="tenant1", config={}) - def test_validate_only_structure_validate_skips_factory(self, mocker): + def test_validate_only_structure_validate_skips_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py index 992b580376..359b04070b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.agent.manager import AgentConfigManager @@ -84,7 +85,7 @@ class TestAgentConfigManagerConvert: assert result.strategy.name == "CHAIN_OF_THOUGHT" - def test_convert_skips_disabled_tools(self, mocker, base_config): + def test_convert_skips_disabled_tools(self, mocker: MockerFixture, base_config): # Patch AgentEntity to bypass pydantic validation mock_agent_entity = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentEntity", @@ -128,7 +129,7 @@ class TestAgentConfigManagerConvert: mock_validate.assert_called_once() mock_agent_entity.assert_called_once() - def test_convert_tool_requires_minimum_keys(self, mocker, base_config): + def test_convert_tool_requires_minimum_keys(self, mocker: MockerFixture, base_config): mock_validate = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentToolEntity.model_validate", return_value=MagicMock(), diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py index a688e2a5c5..3a239eac0e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py @@ -2,6 +2,7 @@ import uuid from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.dataset.manager import DatasetConfigManager from core.entities.agent_entities import PlanningStrategy @@ -69,7 +70,7 @@ class TestDatasetConfigManagerConvert: assert result.dataset_ids == [valid_uuid] assert result.retrieve_config.query_variable == "query" - def test_convert_single_with_metadata_configs(self, valid_uuid, mocker): + def test_convert_single_with_metadata_configs(self, valid_uuid, mocker: MockerFixture): mock_retrieve_config = MagicMock() mock_entity = MagicMock() mock_entity.dataset_ids = [valid_uuid] @@ -258,7 +259,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_invalid_uuid(self, mocker): + def test_extract_invalid_uuid(self, mocker: MockerFixture): invalid_uuid = "not-a-uuid" config = { "agent_mode": { @@ -270,7 +271,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_dataset_not_exists(self, valid_uuid, mocker): + def test_extract_dataset_not_exists(self, valid_uuid, mocker: MockerFixture): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, @@ -292,7 +293,7 @@ class TestExtractDatasetConfig: class TestIsDatasetExists: - def test_dataset_exists_true(self, mocker, valid_uuid): + def test_dataset_exists_true(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "tenant1" mocker.patch( @@ -302,14 +303,14 @@ class TestIsDatasetExists: assert DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_not_found(self, mocker, valid_uuid): + def test_dataset_exists_false_when_not_found(self, mocker: MockerFixture, valid_uuid): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, ) assert not DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_tenant_mismatch(self, mocker, valid_uuid): + def test_dataset_exists_false_when_tenant_mismatch(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "other" mocker.patch( diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py index 186b4a501d..e5b581b6a0 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.entities.model_entities import ModelStatus @@ -16,7 +17,7 @@ from graphon.model_runtime.entities.model_entities import ModelPropertyKey class TestModelConfigConverter: @pytest.fixture(autouse=True) - def patch_response_entity(self, mocker): + def patch_response_entity(self, mocker: MockerFixture): """ Patch ModelConfigWithCredentialsEntity to bypass Pydantic validation and return a simple namespace object instead. @@ -69,7 +70,7 @@ class TestModelConfigConverter: return bundle @pytest.fixture - def patch_provider_manager(self, mocker, mock_provider_bundle): + def patch_provider_manager(self, mocker: MockerFixture, mock_provider_bundle): mock_manager = MagicMock() mock_manager.get_provider_model_bundle.return_value = mock_provider_bundle mocker.patch( @@ -99,7 +100,7 @@ class TestModelConfigConverter: assert result.parameters == {"temperature": 0.7} assert result.stop == ["\n"] - def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_app_config.model.mode = None mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { @@ -116,7 +117,9 @@ class TestModelConfigConverter: result = ModelConfigConverter.convert(mock_app_config) assert result.mode == LLMMode.COMPLETION - def test_convert_mode_from_schema_invalid_fallback(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_invalid_fallback( + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture + ): mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { ModelPropertyKey.MODE: "invalid" } @@ -135,7 +138,7 @@ class TestModelConfigConverter: # Credential Errors # ============================= - def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_current_credentials.return_value = None mock_manager = MagicMock() @@ -152,7 +155,7 @@ class TestModelConfigConverter: # Provider Model Errors # ============================= - def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_provider_model.return_value = None mock_manager = MagicMock() @@ -174,7 +177,7 @@ class TestModelConfigConverter: ], ) def test_convert_provider_model_status_errors( - self, mock_app_config, mock_provider_bundle, mocker, status, expected_exception + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture, status, expected_exception ): mock_provider = MagicMock() mock_provider.status = status @@ -194,7 +197,7 @@ class TestModelConfigConverter: # Schema Errors # ============================= - def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.model_type_instance.get_model_schema.return_value = None mock_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py index 68bca485bb..72e334004e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture # Target from core.app.app_config.easy_ui_based_app.model_config.manager import ModelConfigManager @@ -107,7 +108,9 @@ class TestModelConfigManager: # validate_and_set_defaults # ========================================================== - def test_validate_and_set_defaults_success(self, mocker, valid_config, provider_entities, valid_model_list): + def test_validate_and_set_defaults_success( + self, mocker: MockerFixture, valid_config, provider_entities, valid_model_list + ): self._patch_model_assembly( mocker, provider_entities=provider_entities, @@ -127,35 +130,37 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="object type"): ModelConfigManager.validate_and_set_defaults("tenant1", {"model": "invalid"}) - def test_validate_and_set_defaults_missing_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_invalid_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "invalid/provider", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_missing_name(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_name(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.name is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_empty_models(self, mocker, provider_entities): + def test_validate_and_set_defaults_empty_models(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_model_name(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_invalid_model_name( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "invalid", "completion_params": {}}} self._patch_model_assembly( mocker, @@ -166,7 +171,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_default_mode_when_missing(self, mocker, provider_entities): + def test_validate_and_set_defaults_default_mode_when_missing(self, mocker: MockerFixture, provider_entities): model = MagicMock() model.model = "gpt-4" model.model_properties = {} @@ -178,7 +183,9 @@ class TestModelConfigManager: assert updated_config["model"]["mode"] == "completion" - def test_validate_and_set_defaults_missing_completion_params(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_missing_completion_params( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "gpt-4"}} self._patch_model_assembly( mocker, @@ -189,7 +196,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="completion_params is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker, valid_model_list): + def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker: MockerFixture, valid_model_list): """ Covers branch where provider does not contain '/' and ModelProviderID conversion is triggered (line 64). diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py index fd49072cd5..3fd21ab22b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.prompt_template.manager import ( PromptTemplateConfigManager, @@ -38,7 +39,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError, match="prompt_type is required"): PromptTemplateConfigManager.convert({}) - def test_convert_simple_prompt(self, mocker): + def test_convert_simple_prompt(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -56,7 +57,7 @@ class TestPromptTemplateConfigManagerConvert: assert result == "simple_entity" mock_prompt_entity_cls.assert_called_once_with(prompt_type="simple", simple_prompt_template="hello") - def test_convert_advanced_chat_valid(self, mocker): + def test_convert_advanced_chat_valid(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -97,7 +98,7 @@ class TestPromptTemplateConfigManagerConvert: {"text": "hi", "role": 123}, ], ) - def test_convert_advanced_invalid_message_fields(self, mocker, message): + def test_convert_advanced_invalid_message_fields(self, mocker: MockerFixture, message): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -114,7 +115,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError): PromptTemplateConfigManager.convert(config) - def test_convert_advanced_completion_with_roles(self, mocker): + def test_convert_advanced_completion_with_roles(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -154,7 +155,7 @@ class TestValidateAndSetDefaults: def setup_method(self): self.valid_model = {"mode": "chat"} - def _patch_prompt_type(self, mocker): + def _patch_prompt_type(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mocker.patch( @@ -163,7 +164,7 @@ class TestValidateAndSetDefaults: ) return mock_prompt_entity_cls - def test_default_prompt_type_set(self, mocker): + def test_default_prompt_type_set(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = {"model": self.valid_model} @@ -173,7 +174,7 @@ class TestValidateAndSetDefaults: assert result["prompt_type"] == "simple" assert isinstance(keys, list) - def test_invalid_prompt_type_raises(self, mocker): + def test_invalid_prompt_type_raises(self, mocker: MockerFixture): class InvalidEnum(DummyPromptType): def __iter__(self): return iter([DummyEnumValue("valid")]) @@ -191,7 +192,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_invalid_chat_prompt_config_type(self, mocker): + def test_invalid_chat_prompt_config_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -203,7 +204,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_simple_mode_invalid_pre_prompt_type(self, mocker): + def test_simple_mode_invalid_pre_prompt_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -215,7 +216,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_requires_one_config(self, mocker): + def test_advanced_requires_one_config(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -228,7 +229,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_invalid_model_mode(self, mocker): + def test_advanced_invalid_model_mode(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -240,7 +241,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_chat_prompt_length_exceeds(self, mocker): + def test_advanced_chat_prompt_length_exceeds(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -252,7 +253,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_completion_prefix_defaults_set_when_empty(self, mocker): + def test_completion_prefix_defaults_set_when_empty(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py index d9fe7004ff..b82417cfed 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.variables.manager import ( BasicVariablesConfigManager, @@ -15,7 +16,7 @@ class TestBasicVariablesConfigManagerConvert: assert variables == [] assert external == [] - def test_convert_external_data_tools_enabled_and_disabled(self, mocker): + def test_convert_external_data_tools_enabled_and_disabled(self, mocker: MockerFixture): config = { "external_data_tools": [ {"enabled": False}, @@ -232,7 +233,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_disabled_tool_skipped(self, mocker): + def test_validate_disabled_tool_skipped(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": False}]} spy = mocker.patch( @@ -250,7 +251,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_enabled_tool_calls_factory(self, mocker): + def test_validate_enabled_tool_calls_factory(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": True, "type": "tool", "config": {"a": 1}}]} spy = mocker.patch( @@ -263,7 +264,7 @@ class TestValidateExternalDataToolsAndSetDefaults: class TestValidateAndSetDefaultsIntegration: - def test_validate_and_set_defaults_calls_both(self, mocker): + def test_validate_and_set_defaults_calls_both(self, mocker: MockerFixture): config = {} spy_var = mocker.patch.object( diff --git a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py index e99852cf76..e2ab3e2192 100644 --- a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py @@ -2,6 +2,7 @@ from collections import UserDict from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.base_app_config_manager import BaseAppConfigManager @@ -12,7 +13,7 @@ class TestBaseAppConfigManager: return {"key": "value", "another": 123} @pytest.fixture - def mock_app_additional_features(self, mocker): + def mock_app_additional_features(self, mocker: MockerFixture): mock_instance = MagicMock() mocker.patch( "core.app.app_config.base_app_config_manager.AppAdditionalFeatures", @@ -21,7 +22,7 @@ class TestBaseAppConfigManager: return mock_instance @pytest.fixture - def mock_managers(self, mocker): + def mock_managers(self, mocker: MockerFixture): retrieval = mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", return_value="retrieval_result", @@ -72,7 +73,7 @@ class TestBaseAppConfigManager: ) def test_convert_features_all_modes( self, - mocker, + mocker: MockerFixture, mock_config_dict, mock_app_additional_features, mock_managers, @@ -107,7 +108,7 @@ class TestBaseAppConfigManager: mock_managers["speech_to_text"].assert_called_once_with(config=dict(mock_config_dict.items())) mock_managers["text_to_speech"].assert_called_once_with(config=dict(mock_config_dict.items())) - def test_convert_features_empty_config(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_empty_config(self, mocker: MockerFixture, mock_app_additional_features, mock_managers): # Arrange empty_config = {} mock_app_mode = MagicMock() @@ -143,7 +144,7 @@ class TestBaseAppConfigManager: with pytest.raises((TypeError, AttributeError)): BaseAppConfigManager.convert_features(invalid_config, "CHAT") - def test_convert_features_manager_exception_propagates(self, mocker, mock_config_dict): + def test_convert_features_manager_exception_propagates(self, mocker: MockerFixture, mock_config_dict): # Arrange mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", @@ -154,7 +155,9 @@ class TestBaseAppConfigManager: with pytest.raises(RuntimeError): BaseAppConfigManager.convert_features(mock_config_dict, "CHAT") - def test_convert_features_mapping_subclass(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_mapping_subclass( + self, mocker: MockerFixture, mock_app_additional_features, mock_managers + ): # Arrange class CustomMapping(UserDict): pass diff --git a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py index fa128aca87..dacd69a578 100644 --- a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py +++ b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.workflow_ui_based_app.variables.manager import ( WorkflowVariablesConfigManager, @@ -10,19 +11,19 @@ from core.app.app_config.workflow_ui_based_app.variables.manager import ( @pytest.fixture -def mock_workflow(mocker): +def mock_workflow(mocker: MockerFixture): workflow = mocker.MagicMock() workflow.graph_dict = {"nodes": []} return workflow @pytest.fixture -def mock_variable_entity(mocker): +def mock_variable_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.VariableEntity") @pytest.fixture -def mock_rag_entity(mocker): +def mock_rag_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.RagPipelineVariableEntity") diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py index af5d203f12..bc3b06cd1b 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py @@ -111,7 +111,7 @@ class TestAdvancedChatAppGeneratorInternals: workflow_id="workflow-id", ) - def test_generate_loads_conversation_and_files(self, monkeypatch): + def test_generate_loads_conversation_and_files(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() @@ -195,7 +195,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["application_generate_entity"].files == built_files assert build_files_called["called"] is True - def test_resume_delegates_to_generate(self, monkeypatch): + def test_resume_delegates_to_generate(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( task_id="task", @@ -235,7 +235,7 @@ class TestAdvancedChatAppGeneratorInternals: assert result == {"resumed": True} assert captured["graph_runtime_state"] is not None - def test_single_iteration_generate_builds_debug_task(self, monkeypatch): + def test_single_iteration_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -293,7 +293,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_iteration_run.node_id == "node-1" - def test_single_loop_generate_builds_debug_task(self, monkeypatch): + def test_single_loop_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -351,7 +351,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_loop_run.node_id == "node-2" - def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch): + def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -449,7 +449,7 @@ class TestAdvancedChatAppGeneratorInternals: assert isinstance(captured["conversation"], ConversationSnapshot) assert isinstance(captured["message"], MessageSnapshot) - def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch): + def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -535,7 +535,7 @@ class TestAdvancedChatAppGeneratorInternals: db_session.refresh.assert_not_called() db_session.close.assert_called_once() - def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch): + def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -594,7 +594,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch): + def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -658,7 +658,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_handles_stopped_error(self, monkeypatch): + def test_generate_worker_handles_stopped_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -732,7 +732,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_not_called() - def test_generate_worker_handles_validation_error(self, monkeypatch): + def test_generate_worker_handles_validation_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -816,7 +816,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch): + def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch: pytest.MonkeyPatch): app_config = self._build_app_config() @contextmanager @@ -897,7 +897,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -953,7 +953,7 @@ class TestAdvancedChatAppGeneratorInternals: stream=False, ) - def test_handle_response_re_raises_value_error(self, monkeypatch): + def test_handle_response_re_raises_value_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -1002,7 +1002,7 @@ class TestAdvancedChatAppGeneratorInternals: logger_exception.assert_called_once() - def test_generate_worker_handles_invoke_auth_error(self, monkeypatch): + def test_generate_worker_handles_invoke_auth_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -1088,7 +1088,7 @@ class TestAdvancedChatAppGeneratorInternals: assert queue_manager.publish_error.called - def test_generate_debugger_enables_retrieve_source(self, monkeypatch): + def test_generate_debugger_enables_retrieve_source(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -1167,7 +1167,7 @@ class TestAdvancedChatAppGeneratorInternals: assert app_config.additional_features.show_retrieve_source is True assert captured["application_generate_entity"].query == "hello" - def test_generate_service_api_sets_parent_message_id(self, monkeypatch): + def test_generate_service_api_sets_parent_message_id(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 64bcfa9a18..e5cb8a3383 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -51,7 +51,7 @@ from core.base.tts.app_generator_tts_publisher import AudioTrunk from core.workflow.system_variables import build_system_variables from graphon.entities.pause_reason import PauseReasonType from graphon.enums import BuiltinNodeTypes -from graphon.nodes.human_input.entities import UserAction +from graphon.nodes.human_input.entities import UserActionConfig from graphon.runtime import GraphRuntimeState, VariablePool from libs.datetime_utils import naive_utc_now from models.enums import MessageStatus @@ -148,7 +148,7 @@ class TestAdvancedChatGenerateTaskPipeline: node_title="Approval", form_content="Need approval", inputs=[], - actions=[UserAction(id="approve", title="Approve")], + actions=[UserActionConfig(id="approve", title="Approve")], display_in_ui=True, form_token="token-1", resolved_default_values={}, @@ -224,7 +224,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -368,7 +368,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert list(pipeline._handle_loop_next_event(loop_next)) == ["loop_next"] assert list(pipeline._handle_loop_completed_event(loop_done)) == ["loop_done"] - def test_workflow_finish_handlers(self, monkeypatch): + def test_workflow_finish_handlers(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( @@ -593,7 +593,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert message.answer == "hello" assert message.message_metadata - def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch): + def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._message_end_to_stream_response = lambda: "end" saved: list[str] = [] @@ -614,7 +614,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert responses == ["end"] assert saved == ["saved"] - def test_handle_message_end_event_applies_output_moderation(self, monkeypatch): + def test_handle_message_end_event_applies_output_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py index a871e8d93b..d47b70e950 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py @@ -2,6 +2,7 @@ import uuid from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.agent_chat.app_config_manager import ( @@ -11,7 +12,7 @@ from core.entities.agent_entities import PlanningStrategy class TestAgentChatAppConfigManagerGetAppConfig: - def test_get_app_config_override_config(self, mocker): + def test_get_app_config_override_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"ignored": True} @@ -45,7 +46,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.variables == "variables" assert result.external_data_variables == "external" - def test_get_app_config_conversation_specific(self, mocker): + def test_get_app_config_conversation_specific(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -76,7 +77,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.app_model_config_dict == app_model_config.to_dict.return_value assert result.app_model_config_from.value == "conversation-specific-config" - def test_get_app_config_latest_config(self, mocker): + def test_get_app_config_latest_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -107,7 +108,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: class TestAgentChatAppConfigManagerConfigValidate: - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {}, "user_input_form": {}, @@ -247,7 +248,7 @@ class TestValidateAgentModeAndSetDefaults: {"agent_mode": {"enabled": True, "tools": [{"dataset": {"enabled": True, "id": "bad"}}]}}, ) - def test_old_tool_dataset_id_not_exists(self, mocker): + def test_old_tool_dataset_id_not_exists(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=False, @@ -275,7 +276,7 @@ class TestValidateAgentModeAndSetDefaults: "tenant", {"agent_mode": {"enabled": True, "tools": [tool]}} ) - def test_valid_old_and_new_style_tools(self, mocker): + def test_valid_old_and_new_style_tools(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=True, diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py index 80f7f94b1a..6cd62c933a 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py @@ -2,6 +2,7 @@ import contextlib import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator from core.app.apps.exc import GenerateTaskStoppedError @@ -16,7 +17,7 @@ class DummyAccount: @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = AgentChatAppGenerator() mocker.patch( "core.app.apps.agent_chat.app_generator.current_app", @@ -27,19 +28,19 @@ def generator(mocker): class TestAgentChatAppGeneratorGenerate: - def test_generate_rejects_blocking_mode(self, generator, mocker): + def test_generate_rejects_blocking_mode(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={}, invoke_from=mocker.MagicMock(), streaming=False) - def test_generate_requires_query(self, generator, mocker): + def test_generate_requires_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={"inputs": {}}, invoke_from=mocker.MagicMock()) - def test_generate_rejects_non_string_query(self, generator, mocker): + def test_generate_rejects_non_string_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): @@ -50,7 +51,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=mocker.MagicMock(), ) - def test_generate_override_requires_debugger(self, generator, mocker): + def test_generate_override_requires_debugger(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") @@ -62,7 +63,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_success_with_debugger_override(self, generator, mocker): + def test_generate_success_with_debugger_override(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -142,7 +143,7 @@ class TestAgentChatAppGeneratorGenerate: assert result == {"result": "ok"} thread_obj.start.assert_called_once() - def test_generate_without_file_config(self, generator, mocker): + def test_generate_without_file_config(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -213,14 +214,14 @@ class TestAgentChatAppGeneratorGenerate: class TestAgentChatAppGeneratorWorker: @pytest.fixture(autouse=True) - def patch_context(self, mocker): + def patch_context(self, mocker: MockerFixture): @contextlib.contextmanager def ctx_manager(*args, **kwargs): yield mocker.patch("core.app.apps.agent_chat.app_generator.preserve_flask_contexts", ctx_manager) - def test_generate_worker_handles_generate_task_stopped(self, generator, mocker): + def test_generate_worker_handles_generate_task_stopped(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -250,7 +251,7 @@ class TestAgentChatAppGeneratorWorker: Exception("bad"), ], ) - def test_generate_worker_publishes_errors(self, generator, mocker, error): + def test_generate_worker_publishes_errors(self, generator, mocker: MockerFixture, error): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -271,7 +272,7 @@ class TestAgentChatAppGeneratorWorker: assert queue_manager.publish_error.called - def test_generate_worker_logs_value_error_when_debug(self, generator, mocker): + def test_generate_worker_logs_value_error_when_debug(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py index 4567b35480..0260235b03 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.agent.entities import AgentEntity from core.app.apps.agent_chat.app_runner import AgentChatAppRunner @@ -13,7 +14,7 @@ def runner(): class TestAgentChatAppRunnerRun: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", agent=mocker.MagicMock()) generate_entity = mocker.MagicMock(app_config=app_config, inputs={}, query="q", files=[], stream=True) @@ -22,7 +23,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_moderation_error_direct_output(self, runner, mocker): + def test_run_moderation_error_direct_output(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -45,7 +46,7 @@ class TestAgentChatAppRunnerRun: runner.direct_output.assert_called_once() - def test_run_annotation_reply_short_circuits(self, runner, mocker): + def test_run_annotation_reply_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -74,7 +75,7 @@ class TestAgentChatAppRunnerRun: queue_manager.publish.assert_called_once() runner.direct_output.assert_called_once() - def test_run_hosting_moderation_short_circuits(self, runner, mocker): + def test_run_hosting_moderation_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -98,7 +99,7 @@ class TestAgentChatAppRunnerRun: runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_model_schema_missing(self, runner, mocker): + def test_run_model_schema_missing(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -140,7 +141,7 @@ class TestAgentChatAppRunnerRun: (LLMMode.COMPLETION, "CotCompletionAgentRunner"), ], ) - def test_run_chain_of_thought_modes(self, runner, mocker, mode, expected_runner): + def test_run_chain_of_thought_modes(self, runner, mocker: MockerFixture, mode, expected_runner): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -196,7 +197,7 @@ class TestAgentChatAppRunnerRun: runner_instance.run.assert_called_once() runner._handle_invoke_result.assert_called_once() - def test_run_invalid_llm_mode_raises(self, runner, mocker): + def test_run_invalid_llm_mode_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -242,7 +243,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), conversation, message) - def test_run_function_calling_strategy_selected_by_features(self, runner, mocker): + def test_run_function_calling_strategy_selected_by_features(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -298,7 +299,7 @@ class TestAgentChatAppRunnerRun: assert app_config.agent.strategy == AgentEntity.Strategy.FUNCTION_CALLING runner_instance.run.assert_called_once() - def test_run_conversation_not_found(self, runner, mocker): + def test_run_conversation_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -332,7 +333,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_message_not_found(self, runner, mocker): + def test_run_message_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -366,7 +367,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_invalid_agent_strategy_raises(self, runner, mocker): + def test_run_invalid_agent_strategy_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock(strategy="invalid", provider="p", model="m") diff --git a/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_human_input.py b/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_human_input.py index 1bef6f69cd..9df351fb7a 100644 --- a/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_human_input.py +++ b/api/tests/unit_tests/core/app/apps/common/test_workflow_response_converter_human_input.py @@ -7,6 +7,7 @@ from core.app.entities.queue_entities import QueueHumanInputFormFilledEvent, Que from core.workflow.system_variables import build_system_variables from graphon.entities import WorkflowStartReason from graphon.runtime import GraphRuntimeState, VariablePool +from graphon.variables.segments import StringSegment def _build_converter(): @@ -63,6 +64,37 @@ def test_human_input_form_filled_stream_response_contains_rendered_content(): assert resp.data.action_id == "Approve" +def test_human_input_form_filled_stream_response_serializes_submitted_data(): + converter = _build_converter() + converter.workflow_start_to_stream_response( + task_id="task-1", + workflow_run_id="run-1", + workflow_id="wf-1", + reason=WorkflowStartReason.INITIAL, + ) + + queue_event = QueueHumanInputFormFilledEvent( + node_execution_id="exec-1", + node_id="node-1", + node_type="human-input", + node_title="Human Input", + rendered_content="# Title\nvalue", + action_id="Approve", + action_text="Approve", + submitted_data={ + "decision": StringSegment(value="approve"), + "comment": StringSegment(value="looks good"), + }, + ) + + resp = converter.human_input_form_filled_to_stream_response(event=queue_event, task_id="task-1") + + assert resp.data.submitted_data == { + "decision": "approve", + "comment": "looks good", + } + + def test_human_input_form_timeout_stream_response_contains_timeout_metadata(): converter = _build_converter() converter.workflow_start_to_stream_response( diff --git a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py index aa2085177e..8dcf6e9193 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.completion.app_runner as module from core.app.apps.completion.app_runner import CompletionAppRunner @@ -47,7 +48,7 @@ def _build_generate_entity(app_config, file_upload_config=None): class TestCompletionAppRunner: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -58,7 +59,7 @@ class TestCompletionAppRunner: with pytest.raises(ValueError): runner.run(app_generate_entity, MagicMock(), MagicMock()) - def test_run_moderation_error_outputs_direct(self, runner, mocker): + def test_run_moderation_error_outputs_direct(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -78,7 +79,7 @@ class TestCompletionAppRunner: runner.direct_output.assert_called_once() runner._handle_invoke_result.assert_not_called() - def test_run_hosting_moderation_stops(self, runner, mocker): + def test_run_hosting_moderation_stops(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -97,7 +98,7 @@ class TestCompletionAppRunner: runner._handle_invoke_result.assert_not_called() - def test_run_dataset_and_external_tools_flow(self, runner, mocker): + def test_run_dataset_and_external_tools_flow(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -140,7 +141,7 @@ class TestCompletionAppRunner: assert dataset_retrieval.retrieve.call_args.kwargs["query"] == "query_from_input" runner._handle_invoke_result.assert_called_once() - def test_run_uses_low_image_detail_default(self, runner, mocker): + def test_run_uses_low_image_detail_default(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py index 024bd8f302..353162be8c 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.completion.app_config_manager as module from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.completion.app_config_manager import CompletionAppConfigManager @@ -8,7 +10,7 @@ from models.model import AppMode class TestCompletionAppConfigManager: - def test_get_app_config_with_override(self, mocker): + def test_get_app_config_with_override(self, mocker: MockerFixture): app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -35,8 +37,8 @@ class TestCompletionAppConfigManager: assert result.external_data_variables == ["ext1"] assert result.app_mode == AppMode.COMPLETION - def test_get_app_config_without_override_uses_model_config(self, mocker): - app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) + def test_get_app_config_without_override_uses_model_config(self, mocker: MockerFixture): + app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -53,7 +55,7 @@ class TestCompletionAppConfigManager: assert result.app_model_config_from == EasyUIBasedAppModelConfigFrom.APP_LATEST_CONFIG assert result.app_model_config_dict == {"model": {"provider": "x"}} - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {"provider": "x"}, "variables": ["v"], diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py index f2e35f9900..de20dde677 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture import core.app.apps.completion.app_generator as module from core.app.apps.completion.app_generator import CompletionAppGenerator @@ -15,7 +16,7 @@ from services.errors.message import MessageNotExistsError @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = CompletionAppGenerator() mocker.patch.object(module, "copy_current_request_context", side_effect=lambda fn: fn) @@ -69,7 +70,7 @@ class TestCompletionAppGenerator: streaming=False, ) - def test_generate_success_no_file_config(self, generator, mocker): + def test_generate_success_no_file_config(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) mocker.patch.object(module.FileUploadConfigManager, "convert", return_value=None) @@ -99,7 +100,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_not_called() - def test_generate_success_with_files(self, generator, mocker): + def test_generate_success_with_files(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -131,7 +132,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_called_once() - def test_generate_override_model_config_debugger(self, generator, mocker): + def test_generate_override_model_config_debugger(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -165,7 +166,7 @@ class TestCompletionAppGenerator: assert get_app_config.call_args.kwargs["override_config_dict"] == override_config - def test_generate_more_like_this_message_not_found(self, generator, mocker): + def test_generate_more_like_this_message_not_found(self, generator, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -178,7 +179,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_disabled(self, generator, mocker): + def test_generate_more_like_this_disabled(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=False, more_like_this_dict={"enabled": False}) @@ -195,7 +196,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_app_model_config_missing(self, generator, mocker): + def test_generate_more_like_this_app_model_config_missing(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = None @@ -212,7 +213,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_message_config_none(self, generator, mocker): + def test_generate_more_like_this_message_config_none(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -229,7 +230,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_success(self, generator, mocker): + def test_generate_more_like_this_success(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -297,7 +298,7 @@ class TestCompletionAppGenerator: (RuntimeError("boom"), True), ], ) - def test_generate_worker_error_handling(self, generator, mocker, error, should_publish): + def test_generate_worker_error_handling(self, generator, mocker: MockerFixture, error, should_publish): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py index 5d4c9bcde0..6c1ee20ffb 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py @@ -1,12 +1,14 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.pipeline.pipeline_config_manager as module from core.app.apps.pipeline.pipeline_config_manager import PipelineConfigManager from models.model import AppMode -def test_get_pipeline_config(mocker): +def test_get_pipeline_config(mocker: MockerFixture): pipeline = MagicMock(tenant_id="tenant", id="pipe1") workflow = MagicMock(id="wf1") @@ -26,7 +28,7 @@ def test_get_pipeline_config(mocker): assert result.rag_pipeline_variables == ["var1"] -def test_config_validate_filters_related_keys(mocker): +def test_config_validate_filters_related_keys(mocker: MockerFixture): config = { "file_upload": {"enabled": True}, "tts": {"enabled": True}, diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index c36edf48fc..dd91243a37 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -3,6 +3,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, PropertyMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_generator as module from core.app.apps.exc import GenerateTaskStoppedError @@ -23,7 +24,7 @@ class FakeRagPipelineGenerateEntity(SimpleNamespace): @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = module.PipelineGenerator() mocker.patch.object(module, "RagPipelineGenerateEntity", FakeRagPipelineGenerateEntity) @@ -88,7 +89,7 @@ class DummySession: return False -def test_generate_dataset_missing(generator, mocker): +def test_generate_dataset_missing(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -106,7 +107,7 @@ def test_generate_dataset_missing(generator, mocker): ) -def test_generate_debugger_calls_generate(generator, mocker): +def test_generate_debugger_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -150,7 +151,7 @@ def test_generate_debugger_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker): +def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -228,7 +229,7 @@ def test_generate_published_pipeline_creates_documents_and_delay(generator, mock task_proxy.delay.assert_called_once() -def test_generate_is_retry_calls_generate(generator, mocker): +def test_generate_is_retry_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -273,7 +274,7 @@ def test_generate_is_retry_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_worker_handles_errors(generator, mocker): +def test_generate_worker_handles_errors(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -308,7 +309,7 @@ def test_generate_worker_handles_errors(generator, mocker): queue_manager.publish_error.assert_called_once() -def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker): +def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -341,7 +342,7 @@ def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker assert module.PipelineRunner.call_args.kwargs["system_user_id"] == "session" -def test_generate_raises_when_workflow_not_found(generator, mocker): +def test_generate_raises_when_workflow_not_found(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -369,7 +370,7 @@ def test_generate_raises_when_workflow_not_found(generator, mocker): ) -def test_generate_success_returns_converted(generator, mocker): +def test_generate_success_returns_converted(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -409,7 +410,7 @@ def test_generate_success_returns_converted(generator, mocker): assert result == "converted" -def test_single_iteration_generate_validates_inputs(generator, mocker): +def test_single_iteration_generate_validates_inputs(generator, mocker: MockerFixture): with pytest.raises(ValueError): generator.single_iteration_generate(_build_pipeline(), _build_workflow(), "", _build_user(), {}) @@ -419,7 +420,7 @@ def test_single_iteration_generate_validates_inputs(generator, mocker): ) -def test_single_iteration_generate_dataset_required(generator, mocker): +def test_single_iteration_generate_dataset_required(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -436,7 +437,7 @@ def test_single_iteration_generate_dataset_required(generator, mocker): ) -def test_single_iteration_generate_success(generator, mocker): +def test_single_iteration_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -476,7 +477,7 @@ def test_single_iteration_generate_success(generator, mocker): assert result == {"ok": True} -def test_single_loop_generate_success(generator, mocker): +def test_single_loop_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -516,7 +517,7 @@ def test_single_loop_generate_success(generator, mocker): assert result == {"ok": True} -def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker): +def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() app_entity = FakeRagPipelineGenerateEntity(task_id="t") @@ -536,7 +537,7 @@ def test_handle_response_value_error_triggers_generate_task_stopped(generator, m ) -def test_build_document_sets_metadata_for_builtin_fields(generator, mocker): +def test_build_document_sets_metadata_for_builtin_fields(generator, mocker: MockerFixture): class DummyDocument(SimpleNamespace): pass @@ -620,7 +621,7 @@ def test_format_datasource_info_list_missing_node_data(generator): ) -def test_format_datasource_info_list_online_drive_folder(generator, mocker): +def test_format_datasource_info_list_online_drive_folder(generator, mocker: MockerFixture): workflow = MagicMock( graph_dict={ "nodes": [ diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py index 9db83f5531..abfc76afa0 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_queue_manager as module from core.app.apps.base_app_queue_manager import PublishFrom @@ -16,7 +17,7 @@ from core.app.entities.queue_entities import ( from graphon.model_runtime.entities.llm_entities import LLMResult -def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): +def test_publish_sets_stop_listen_and_raises_on_stopped(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -28,7 +29,7 @@ def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): manager.stop_listen.assert_called_once() -def test_publish_stop_events_trigger_stop_listen(mocker): +def test_publish_stop_events_trigger_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -46,7 +47,7 @@ def test_publish_stop_events_trigger_stop_listen(mocker): manager.stop_listen.assert_called_once() -def test_publish_non_stop_event_no_stop_listen(mocker): +def test_publish_non_stop_event_no_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 603062a51c..1eed76cf84 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -22,6 +22,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_runner as module from core.app.apps.pipeline.pipeline_runner import PipelineRunner @@ -126,7 +127,7 @@ def test_update_document_status_on_failure(mocker, runner): session.commit.assert_called_once() -def test_run_pipeline_not_found(mocker): +def test_run_pipeline_not_found(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.invoke_from = InvokeFrom.WEB_APP app_generate_entity.single_iteration_run = None @@ -150,7 +151,7 @@ def test_run_pipeline_not_found(mocker): runner.run() -def test_run_workflow_not_initialized(mocker): +def test_run_workflow_not_initialized(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") @@ -174,7 +175,7 @@ def test_run_workflow_not_initialized(mocker): runner.run() -def test_run_single_iteration_path(mocker): +def test_run_single_iteration_path(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.single_iteration_run = MagicMock() @@ -223,7 +224,7 @@ def test_run_single_iteration_path(mocker): runner._handle_event.assert_called() -def test_run_normal_path_builds_graph(mocker): +def test_run_normal_path_builds_graph(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") diff --git a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py index f48a7fb38e..835c9a8576 100644 --- a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py @@ -45,7 +45,7 @@ def _make_generate_entity(app_config: WorkflowUIBasedAppConfig) -> AdvancedChatA @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -108,7 +108,7 @@ def test_init_generate_records_marks_existing_conversation(): assert entity.is_new_conversation is False -def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch): +def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch: pytest.MonkeyPatch): app_config = _make_app_config() entity = _make_generate_entity(app_config) entity.conversation_id = "existing-conversation-id" diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py index b0f8b423e1..f2a1700664 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py @@ -369,7 +369,7 @@ def test_validate_inputs_optional_file_with_empty_string_ignores_default(): class TestBaseAppGeneratorExtras: - def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch): + def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch: pytest.MonkeyPatch): base_app_generator = BaseAppGenerator() variables = [ diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py index 17de39ca99..c6eedf7be7 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py @@ -42,7 +42,7 @@ class _QueueRecorder: class TestAppRunner: - def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch): + def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -65,7 +65,7 @@ class TestAppRunner: assert model_config.parameters["max_tokens"] == 20 - def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch): + def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -86,7 +86,7 @@ class TestAppRunner: assert runner.recalc_llm_max_tokens(model_config, prompt_messages=[]) == -1 - def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch): + def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(model_conf=SimpleNamespace(model="mock"), stream=True) @@ -133,7 +133,7 @@ class TestAppRunner: stream=True, ) - def test_organize_prompt_messages_simple_template(self, monkeypatch): + def test_organize_prompt_messages_simple_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=["STOP"]) prompt_template_entity = PromptTemplateEntity( @@ -158,7 +158,7 @@ class TestAppRunner: assert prompt_messages == ["simple-message"] assert stop == ["simple-stop"] - def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="completion", stop=[""]) captured: dict[str, object] = {} @@ -191,7 +191,7 @@ class TestAppRunner: assert memory_config.role_prefix.user == "U" assert memory_config.role_prefix.assistant == "A" - def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=[""]) captured: dict[str, object] = {} @@ -245,7 +245,7 @@ class TestAppRunner: files=[], ) - def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch): + def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() warning_logger = MagicMock() @@ -284,7 +284,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.message.content == "abc" warning_logger.assert_called_once() - def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch): + def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() exception_logger = MagicMock() @@ -331,7 +331,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.usage == usage exception_logger.assert_called_once() - def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch): + def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() class _ToggleBool: @@ -367,7 +367,7 @@ class TestAppRunner: db_session.add.assert_not_called() queue_manager.publish.assert_not_called() - def test_check_hosting_moderation_direct_output_called(self, monkeypatch): + def test_check_hosting_moderation_direct_output_called(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(stream=False) @@ -388,7 +388,7 @@ class TestAppRunner: assert result is True assert direct_output.called - def test_fill_in_inputs_from_external_data_tools(self, monkeypatch): + def test_fill_in_inputs_from_external_data_tools(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.ExternalDataFetch.fetch", @@ -405,7 +405,7 @@ class TestAppRunner: assert result == {"foo": "bar"} - def test_moderation_for_inputs_returns_result(self, monkeypatch): + def test_moderation_for_inputs_returns_result(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.InputModeration.check", @@ -424,7 +424,7 @@ class TestAppRunner: assert result == (True, {}, "") - def test_query_app_annotations_to_reply(self, monkeypatch): + def test_query_app_annotations_to_reply(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.AnnotationReplyFeature.query", diff --git a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py index 1250ac5ecf..6a9b5e7619 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py @@ -85,7 +85,7 @@ def _make_chat_generate_entity(app_config: EasyUIBasedAppConfig) -> ChatAppGener @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -130,7 +130,7 @@ def test_init_generate_records_sets_conversation_fields_for_chat_entity(): class TestMessageBasedAppGeneratorExtras: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() class _Pipeline: @@ -155,7 +155,7 @@ class TestMessageBasedAppGeneratorExtras: stream=False, ) - def test_get_app_model_config_requires_valid_config(self, monkeypatch): + def test_get_app_model_config_requires_valid_config(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() app_model = SimpleNamespace(id="app", app_model_config_id=None, app_model_config=None) diff --git a/api/tests/unit_tests/core/app/apps/test_pause_resume.py b/api/tests/unit_tests/core/app/apps/test_pause_resume.py index 6104b8d6ca..aa71f4d9c4 100644 --- a/api/tests/unit_tests/core/app/apps/test_pause_resume.py +++ b/api/tests/unit_tests/core/app/apps/test_pause_resume.py @@ -3,6 +3,8 @@ import time from types import ModuleType, SimpleNamespace from typing import Any +from pytest_mock import MockerFixture + import graphon.nodes.human_input.entities # noqa: F401 from core.app.apps.advanced_chat import app_generator as adv_app_gen_module from core.app.apps.workflow import app_generator as wf_app_gen_module @@ -101,7 +103,7 @@ class _StubToolNode(Node[_StubToolNodeData]): yield self._convert_node_run_result_to_graph_node_event(result) -def _patch_tool_node(mocker): +def _patch_tool_node(mocker: MockerFixture): original_resolve_node_class = node_factory_module.resolve_workflow_node_class def _patched_resolve_node_class(*, node_type: NodeType, node_version: str) -> type[Node]: @@ -196,7 +198,7 @@ def _node_successes(events: list[GraphEngineEvent]) -> list[str]: return [evt.node_id for evt in events if isinstance(evt, NodeRunSucceededEvent)] -def test_workflow_app_pause_resume_matches_baseline(mocker): +def test_workflow_app_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("baseline") @@ -236,7 +238,7 @@ def test_workflow_app_pause_resume_matches_baseline(mocker): assert resumed_state.outputs == baseline_outputs -def test_advanced_chat_pause_resume_matches_baseline(mocker): +def test_advanced_chat_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("adv-baseline") diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index 58f0e47a4b..12f3ed9f07 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -54,7 +54,7 @@ class FakeTopic: return self._state["subscribed"] -def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch): +def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() def fake_get_response_topic(cls, app_mode, workflow_run_id): @@ -92,7 +92,7 @@ def test_normalize_terminal_events_empty_values(): assert _normalize_terminal_events([]) == set({}) -def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): +def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py index 7e8367c6c4..0e9f8b6f35 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + from core.app.apps.workflow.app_generator import SKIP_PREPARE_USER_INPUTS_KEY, WorkflowAppGenerator @@ -22,7 +24,7 @@ def test_should_prepare_user_inputs_keeps_validation_when_flag_false(): assert WorkflowAppGenerator()._should_prepare_user_inputs(args) -def test_resume_delegates_to_generate(mocker): +def test_resume_delegates_to_generate(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_generate = mocker.patch.object(generator, "_generate", return_value="ok") @@ -52,7 +54,7 @@ def test_resume_delegates_to_generate(mocker): assert kwargs["invoke_from"] == "debugger" -def test_generate_appends_pause_layer_and_forwards_state(mocker): +def test_generate_appends_pause_layer_and_forwards_state(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_queue_manager = MagicMock() @@ -124,7 +126,7 @@ def test_generate_appends_pause_layer_and_forwards_state(mocker): assert worker_kwargs["kwargs"]["graph_runtime_state"] is graph_runtime_state -def test_resume_path_runs_worker_with_runtime_state(mocker): +def test_resume_path_runs_worker_with_runtime_state(mocker: MockerFixture): generator = WorkflowAppGenerator() runtime_state = MagicMock(name="runtime-state") diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py index 58c7bfa4bc..dbe846cbc5 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py @@ -9,6 +9,7 @@ from core.app.apps.workflow_app_runner import WorkflowBasedAppRunner from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.app.entities.queue_entities import ( QueueAgentLogEvent, + QueueHumanInputFormFilledEvent, QueueIterationCompletedEvent, QueueLoopCompletedEvent, QueueNodeExceptionEvent, @@ -30,6 +31,7 @@ from graphon.graph_events import ( NodeRunAgentLogEvent, NodeRunExceptionEvent, NodeRunFailedEvent, + NodeRunHumanInputFormFilledEvent, NodeRunIterationSucceededEvent, NodeRunLoopFailedEvent, NodeRunRetryEvent, @@ -39,6 +41,7 @@ from graphon.graph_events import ( ) from graphon.node_events import NodeRunResult from graphon.runtime import GraphRuntimeState, VariablePool +from graphon.variables.segments import StringSegment from graphon.variables.variables import StringVariable @@ -90,7 +93,7 @@ class TestWorkflowBasedAppRunner: with pytest.raises(ValueError, match="Neither single_iteration_run nor single_loop_run"): runner._prepare_single_node_execution(workflow, None, None, user_id="00000000-0000-0000-0000-000000000001") - def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch): + def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch: pytest.MonkeyPatch): runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=default_system_variables()), @@ -142,7 +145,9 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool is graph_runtime_state.variable_pool - def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init(self, monkeypatch): + def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init( + self, monkeypatch: pytest.MonkeyPatch + ): variable_loader = SimpleNamespace( load_variables=lambda selectors: ( [ @@ -232,7 +237,7 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool.get(["sys", "conversation_id"]).value == "conv-1" - def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch): + def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch: pytest.MonkeyPatch): published: list[object] = [] class _QueueManager: @@ -361,6 +366,38 @@ class TestWorkflowBasedAppRunner: assert any(isinstance(event, QueueIterationCompletedEvent) for event in published) assert any(isinstance(event, QueueLoopCompletedEvent) for event in published) + def test_handle_human_input_form_filled_event_preserves_submitted_data(self): + published: list[object] = [] + + class _QueueManager: + def publish(self, event, publish_from): + published.append(event) + + runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") + graph_runtime_state = GraphRuntimeState( + variable_pool=VariablePool(system_variables=default_system_variables()), + start_at=0.0, + ) + workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) + + runner._handle_event( + workflow_entry, + NodeRunHumanInputFormFilledEvent( + id="exec", + node_id="node", + node_type=BuiltinNodeTypes.HUMAN_INPUT, + node_title="Human Input", + rendered_content="content", + action_id="approve", + action_text="Approve", + submitted_data={"decision": StringSegment(value="approve")}, + ), + ) + + queue_event = published[-1] + assert isinstance(queue_event, QueueHumanInputFormFilledEvent) + assert queue_event.submitted_data == {"decision": StringSegment(value="approve")} + @pytest.mark.parametrize( ("event_factory", "queue_event_cls"), [ diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_pause_events.py b/api/tests/unit_tests/core/app/apps/test_workflow_pause_events.py index a3ab379b66..61053a3784 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_pause_events.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_pause_events.py @@ -14,8 +14,7 @@ from core.workflow.system_variables import build_system_variables from graphon.entities import WorkflowStartReason from graphon.entities.pause_reason import HumanInputRequired from graphon.graph_events import GraphRunPausedEvent -from graphon.nodes.human_input.entities import FormInput, UserAction -from graphon.nodes.human_input.enums import FormInputType +from graphon.nodes.human_input.entities import ParagraphInputConfig, UserActionConfig from models.account import Account from models.human_input import RecipientType @@ -156,10 +155,8 @@ def test_queue_workflow_paused_event_to_stream_responses(monkeypatch: pytest.Mon reason = HumanInputRequired( form_id="form-1", form_content="Rendered", - inputs=[ - FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="field", default=None), - ], - actions=[UserAction(id="approve", title="Approve")], + inputs=[ParagraphInputConfig(output_variable_name="field")], + actions=[UserActionConfig(id="approve", title="Approve")], node_id="node-id", node_title="Human Step", ) diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py index 09ad078a70..320189143e 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py @@ -67,7 +67,7 @@ class TestWorkflowAppGeneratorValidation: class TestWorkflowAppGeneratorHandleResponse: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -116,7 +116,7 @@ class TestWorkflowAppGeneratorHandleResponse: class TestWorkflowAppGeneratorGenerate: - def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch): + def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 0bcc1029b0..1311d5e9cb 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -187,7 +187,7 @@ class TestWorkflowGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -408,7 +408,7 @@ class TestWorkflowGenerateTaskPipeline: assert list(pipeline._handle_human_input_form_timeout_event(timeout_event)) == ["timeout"] assert list(pipeline._handle_agent_log_event(agent_event)) == ["log"] - def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch): + def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -560,7 +560,7 @@ class TestWorkflowGenerateTaskPipeline: responses = list(pipeline._wrapper_process_stream_response()) assert responses == [PingStreamResponse(task_id="task")] - def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch): + def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -597,7 +597,7 @@ class TestWorkflowGenerateTaskPipeline: assert sleep_spy assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch): + def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -633,7 +633,7 @@ class TestWorkflowGenerateTaskPipeline: assert logger_exception assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_database_session_rolls_back_on_error(self, monkeypatch): + def test_database_session_rolls_back_on_error(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() calls = {"enter": 0, "exit_exc": None} diff --git a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py index a20d89d807..f10e0084d0 100644 --- a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py @@ -143,7 +143,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._listen_audio_msg(publisher=None, task_id="task") is None - def test_process_stream_response_handles_chunks_and_end(self, monkeypatch): + def test_process_stream_response_handles_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -245,7 +245,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(event, QueueLLMChunkEvent) for event in events) assert any(isinstance(event, QueueStopEvent) for event in events) - def test_handle_stop_updates_usage(self, monkeypatch): + def test_handle_stop_updates_usage(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -313,7 +313,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._task_state.llm_result.usage.prompt_tokens == 10 assert pipeline._task_state.llm_result.usage.completion_tokens == 5 - def test_record_files_builds_file_payloads(self, monkeypatch): + def test_record_files_builds_file_payloads(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -405,7 +405,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert files assert len(files) == 3 - def test_process_stream_response_handles_annotation_and_error(self, monkeypatch): + def test_process_stream_response_handles_annotation_and_error(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -472,7 +472,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert isinstance(responses[-1], ValueError) assert pipeline._task_state.llm_result.message.content == "annotatedagent" - def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -681,7 +681,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses == ["payload"] - def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch): + def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -715,7 +715,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses[1] == "payload" assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch): + def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -756,7 +756,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(item, MessageAudioStreamResponse) for item in responses) assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch): + def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -896,7 +896,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert list(pipeline._process_stream_response(publisher=None)) == [] - def test_save_message_persists_fields_and_emits_trace(self, monkeypatch): + def test_save_message_persists_fields_and_emits_trace(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -981,7 +981,7 @@ class TestEasyUiBasedGenerateTaskPipeline: with pytest.raises(ValueError, match="Conversation conv not found"): pipeline._save_message(session=session) - def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch): + def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1021,7 +1021,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.metadata["usage"]["prompt_tokens"] == 1 - def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch): + def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1059,7 +1059,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.files is None - def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch): + def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1155,7 +1155,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.answer == "hello" - def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( diff --git a/api/tests/unit_tests/core/app/workflow/test_node_factory.py b/api/tests/unit_tests/core/app/workflow/test_node_factory.py index 30a068f4c5..7c9f174129 100644 --- a/api/tests/unit_tests/core/app/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/app/workflow/test_node_factory.py @@ -46,7 +46,7 @@ class TestDifyNodeFactory: lambda **_kwargs: node_class, ) - def _factory(self, monkeypatch): + def _factory(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_STRING_LENGTH", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_NUMBER", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MIN_NUMBER", -10) @@ -72,20 +72,20 @@ class TestDifyNodeFactory: graph_runtime_state=SimpleNamespace(), ) - def test_create_node_unknown_type(self, monkeypatch): + def test_create_node_unknown_type(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": "unknown"}}) - def test_create_node_missing_mapping(self, monkeypatch): + def test_create_node_missing_mapping(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr("core.workflow.node_factory.get_node_type_classes_mapping", lambda: {}) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_missing_latest_class(self, monkeypatch): + def test_create_node_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr( "core.workflow.node_factory.get_node_type_classes_mapping", @@ -96,7 +96,7 @@ class TestDifyNodeFactory: with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_selects_versioned_class(self, monkeypatch): + def test_create_node_selects_versioned_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) selected_versions: list[tuple[str, str]] = [] @@ -115,7 +115,7 @@ class TestDifyNodeFactory: assert node.id == "node-1" assert selected_versions == [("snapshot", "called")] - def test_create_node_code_branch(self, monkeypatch): + def test_create_node_code_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyCodeNode) @@ -124,7 +124,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyCodeNode) assert node.id == "node-1" - def test_create_node_template_transform_branch(self, monkeypatch): + def test_create_node_template_transform_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyTemplateTransformNode) @@ -133,7 +133,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyTemplateTransformNode) assert "jinja2_template_renderer" in node.kwargs - def test_create_node_http_request_branch(self, monkeypatch): + def test_create_node_http_request_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyHttpRequestNode) @@ -142,7 +142,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyHttpRequestNode) assert "http_request_config" in node.kwargs - def test_create_node_knowledge_retrieval_branch(self, monkeypatch): + def test_create_node_knowledge_retrieval_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyKnowledgeRetrievalNode) @@ -151,7 +151,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyKnowledgeRetrievalNode) assert node.kwargs == {} - def test_create_node_document_extractor_branch(self, monkeypatch): + def test_create_node_document_extractor_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyDocumentExtractorNode) diff --git a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py index 82552470a9..04ce524904 100644 --- a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py +++ b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py @@ -2,12 +2,14 @@ from __future__ import annotations from types import SimpleNamespace +import pytest + from core.app.workflow.layers.observability import ObservabilityLayer from graphon.enums import BuiltinNodeTypes class TestObservabilityLayerExtras: - def test_init_tracer_enabled_sets_tracer(self, monkeypatch): + def test_init_tracer_enabled_sets_tracer(self, monkeypatch: pytest.MonkeyPatch): tracer = object() monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -18,7 +20,7 @@ class TestObservabilityLayerExtras: assert layer._is_disabled is False assert layer._tracer is tracer - def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch, caplog): + def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch: pytest.MonkeyPatch, caplog): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -33,7 +35,7 @@ class TestObservabilityLayerExtras: assert layer._tracer is None assert "Failed to get OpenTelemetry tracer" in caplog.text - def test_init_tracer_disables_when_otel_disabled(self, monkeypatch): + def test_init_tracer_disables_when_otel_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", False) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -143,7 +145,7 @@ class TestObservabilityLayerExtras: assert layer._node_contexts == {} - def test_on_node_run_end_calls_span_end(self, monkeypatch): + def test_on_node_run_end_calls_span_end(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False ended: list[str] = [] @@ -164,7 +166,7 @@ class TestObservabilityLayerExtras: assert ended == ["ended"] assert "exec" not in layer._node_contexts - def test_on_node_run_end_logs_detach_failure(self, monkeypatch, caplog): + def test_on_node_run_end_logs_detach_failure(self, monkeypatch: pytest.MonkeyPatch, caplog): layer = ObservabilityLayer() layer._is_disabled = False @@ -186,7 +188,7 @@ class TestObservabilityLayerExtras: assert "Failed to detach OpenTelemetry token" in caplog.text assert "exec" not in layer._node_contexts - def test_on_node_run_start_and_end_creates_span(self, monkeypatch): + def test_on_node_run_start_and_end_creates_span(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False diff --git a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py index cacb4dd4fa..23fe682017 100644 --- a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py +++ b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py @@ -120,7 +120,7 @@ class TestWorkflowPersistenceLayer: with pytest.raises(ValueError, match="workflow_execution_id must be provided"): layer._get_execution_id() - def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch): + def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch: pytest.MonkeyPatch): layer, _, _, _ = _make_layer() monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py index 7b433ab57b..1125ce6dbc 100644 --- a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py +++ b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py @@ -3,6 +3,7 @@ import queue from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.base.tts.app_generator_tts_publisher import ( AppGeneratorTTSPublisher, @@ -17,7 +18,7 @@ from core.base.tts.app_generator_tts_publisher import ( @pytest.fixture -def mock_model_instance(mocker): +def mock_model_instance(mocker: MockerFixture): model = mocker.MagicMock() model.invoke_tts.return_value = [b"audio1", b"audio2"] model.get_tts_voices.return_value = [{"value": "voice1"}, {"value": "voice2"}] @@ -33,7 +34,7 @@ def mock_model_manager(mocker, mock_model_instance): @pytest.fixture(autouse=True) -def patch_threads(mocker): +def patch_threads(mocker: MockerFixture): """Prevent real threads from starting during tests""" mocker.patch("threading.Thread.start", return_value=None) @@ -114,7 +115,7 @@ class TestProcessFuture: finish = audio_queue.get() assert finish.status == "finish" - def test_process_future_exception(self, mocker): + def test_process_future_exception(self, mocker: MockerFixture): future_queue = queue.Queue() audio_queue = queue.Queue() @@ -222,7 +223,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker): + def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -297,7 +298,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -332,7 +333,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "Hello " - def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -358,7 +359,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "" - def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker): + def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() diff --git a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py index 4c1aa33540..f9b3b1864e 100644 --- a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py @@ -1,8 +1,10 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.callback_handler.agent_tool_callback_handler as module +from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler # ----------------------------- # Fixtures @@ -10,17 +12,17 @@ import core.callback_handler.agent_tool_callback_handler as module @pytest.fixture -def enable_debug(mocker): +def enable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", True) @pytest.fixture -def disable_debug(mocker): +def disable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", False) @pytest.fixture -def mock_print(mocker): +def mock_print(mocker: MockerFixture): return mocker.patch("builtins.print") @@ -71,7 +73,7 @@ class TestPrintText: module.print_text("hello") mock_print.assert_called_once_with("hello", end="", file=None) - def test_print_text_with_color(self, mocker, mock_print): + def test_print_text_with_color(self, mocker: MockerFixture, mock_print): mock_get_color = mocker.patch( "core.callback_handler.agent_tool_callback_handler.get_colored_text", return_value="colored_text", @@ -82,7 +84,7 @@ class TestPrintText: mock_get_color.assert_called_once_with("hello", "green") mock_print.assert_called_once_with("colored_text", end="", file=None) - def test_print_text_with_file_flush(self, mocker): + def test_print_text_with_file_flush(self, mocker: MockerFixture): mock_file = MagicMock() mock_print = mocker.patch("builtins.print") @@ -107,21 +109,25 @@ class TestDifyAgentCallbackHandler: assert handler.color == "green" assert handler.current_loop == 1 - def test_on_tool_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_start_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_called() - def test_on_tool_start_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_start_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_not_called() - def test_on_tool_end_debug_enabled_and_trace(self, handler, enable_debug, mocker): + def test_on_tool_end_debug_enabled_and_trace( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") mock_trace_manager = MagicMock() @@ -137,7 +143,9 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 mock_trace_manager.add_trace_task.assert_called_once() - def test_on_tool_end_without_trace_manager(self, handler, enable_debug, mocker): + def test_on_tool_end_without_trace_manager( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_end( @@ -148,14 +156,16 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 - def test_on_tool_error_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_error_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) mock_print_text.assert_called_once() - def test_on_tool_error_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_error_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) @@ -163,14 +173,16 @@ class TestDifyAgentCallbackHandler: mock_print_text.assert_not_called() @pytest.mark.parametrize("thought", ["thinking", ""]) - def test_on_agent_start(self, handler, enable_debug, mocker, thought): + def test_on_agent_start(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture, thought): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_agent_start(thought) mock_print_text.assert_called() - def test_on_agent_finish_increments_loop(self, handler, enable_debug, mocker): + def test_on_agent_finish_increments_loop( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") current_loop = handler.current_loop @@ -179,19 +191,21 @@ class TestDifyAgentCallbackHandler: assert handler.current_loop == current_loop + 1 mock_print_text.assert_called() - def test_on_datasource_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_datasource_start_debug_enabled( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_datasource_start("ds1", {"x": 1}) mock_print_text.assert_called_once() - def test_ignore_agent_property(self, disable_debug, handler): + def test_ignore_agent_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is True - def test_ignore_chat_model_property(self, disable_debug, handler): + def test_ignore_chat_model_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_chat_model is True - def test_ignore_properties_when_debug_enabled(self, enable_debug, handler): + def test_ignore_properties_when_debug_enabled(self, enable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is False assert handler.ignore_chat_model is False diff --git a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py index 8e5670e9be..f23669c3c7 100644 --- a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom from core.callback_handler.index_tool_callback_handler import ( @@ -7,12 +8,12 @@ from core.callback_handler.index_tool_callback_handler import ( @pytest.fixture -def mock_queue_manager(mocker): +def mock_queue_manager(mocker: MockerFixture): return mocker.Mock() @pytest.fixture -def handler(mock_queue_manager, mocker): +def handler(mock_queue_manager, mocker: MockerFixture): mocker.patch( "core.callback_handler.index_tool_callback_handler.db", ) @@ -34,7 +35,7 @@ class TestOnQuery: (InvokeFrom.WEB_APP, "end_user"), ], ) - def test_on_query_success_roles(self, mocker, mock_queue_manager, invoke_from, expected_role): + def test_on_query_success_roles(self, mocker: MockerFixture, mock_queue_manager, invoke_from, expected_role): # Arrange mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") @@ -57,7 +58,7 @@ class TestOnQuery: assert dataset_query.created_by_role == expected_role mock_db.session.commit.assert_called_once() - def test_on_query_none_values(self, mocker, mock_queue_manager): + def test_on_query_none_values(self, mocker: MockerFixture, mock_queue_manager): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") handler = DatasetIndexToolCallbackHandler( @@ -75,7 +76,7 @@ class TestOnQuery: class TestOnToolEnd: - def test_on_tool_end_no_metadata(self, handler, mocker): + def test_on_tool_end_no_metadata(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") document = mocker.Mock() @@ -85,7 +86,9 @@ class TestOnToolEnd: mock_db.session.commit.assert_not_called() - def test_on_tool_end_dataset_document_not_found(self, handler, mocker): + def test_on_tool_end_dataset_document_not_found( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_db.session.scalar.return_value = None @@ -96,7 +99,9 @@ class TestOnToolEnd: mock_db.session.scalar.assert_called_once() - def test_on_tool_end_parent_child_index_with_child(self, handler, mocker): + def test_on_tool_end_parent_child_index_with_child( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -119,7 +124,7 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_non_parent_child_index(self, handler, mocker): + def test_on_tool_end_non_parent_child_index(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -139,12 +144,12 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_empty_documents(self, handler): + def test_on_tool_end_empty_documents(self, handler: DatasetIndexToolCallbackHandler): handler.on_tool_end([]) class TestReturnRetrieverResourceInfo: - def test_publish_called(self, handler, mock_queue_manager, mocker): + def test_publish_called(self, handler: DatasetIndexToolCallbackHandler, mock_queue_manager, mocker: MockerFixture): mock_event = mocker.patch("core.callback_handler.index_tool_callback_handler.QueueRetrieverResourcesEvent") resources = [mocker.Mock()] diff --git a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py index 131fb006ed..5b53c5965c 100644 --- a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, call import pytest +from pytest_mock import MockerFixture from core.callback_handler.workflow_tool_callback_handler import ( DifyWorkflowCallbackHandler, @@ -26,13 +27,13 @@ def handler(): @pytest.fixture -def mock_print_text(mocker): +def mock_print_text(mocker: MockerFixture): """Mock print_text to avoid real stdout printing.""" return mocker.patch("core.callback_handler.workflow_tool_callback_handler.print_text") class TestDifyWorkflowCallbackHandler: - def test_on_tool_execution_single_output_success(self, handler, mock_print_text): + def test_on_tool_execution_single_output_success(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "test_tool" tool_inputs = {"a": 1} @@ -62,7 +63,7 @@ class TestDifyWorkflowCallbackHandler: ] ) - def test_on_tool_execution_multiple_outputs(self, handler, mock_print_text): + def test_on_tool_execution_multiple_outputs(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "multi_tool" outputs = [ @@ -83,7 +84,7 @@ class TestDifyWorkflowCallbackHandler: assert results == outputs assert mock_print_text.call_count == 4 * len(outputs) - def test_on_tool_execution_empty_iterable(self, handler, mock_print_text): + def test_on_tool_execution_empty_iterable(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "empty_tool" @@ -108,7 +109,9 @@ class TestDifyWorkflowCallbackHandler: ("not_iterable", AttributeError), ], ) - def test_on_tool_execution_invalid_outputs_type(self, handler, invalid_outputs, expected_exception): + def test_on_tool_execution_invalid_outputs_type( + self, handler: DifyWorkflowCallbackHandler, invalid_outputs, expected_exception + ): # Arrange tool_name = "invalid_tool" @@ -122,7 +125,7 @@ class TestDifyWorkflowCallbackHandler: ) ) - def test_on_tool_execution_long_json_truncation(self, handler, mock_print_text): + def test_on_tool_execution_long_json_truncation(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "long_json_tool" long_json = "x" * 1500 @@ -144,7 +147,7 @@ class TestDifyWorkflowCallbackHandler: color="blue", ) - def test_on_tool_execution_model_dump_json_exception(self, handler, mock_print_text): + def test_on_tool_execution_model_dump_json_exception(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "exception_tool" bad_message = MagicMock() @@ -163,7 +166,9 @@ class TestDifyWorkflowCallbackHandler: # Ensure first two prints happened before failure assert mock_print_text.call_count >= 2 - def test_on_tool_execution_none_message_id_and_trace_manager(self, handler, mock_print_text): + def test_on_tool_execution_none_message_id_and_trace_manager( + self, handler: DifyWorkflowCallbackHandler, mock_print_text + ): # Arrange tool_name = "optional_params_tool" message = DummyToolInvokeMessage('{"data": "ok"}') diff --git a/api/tests/unit_tests/core/datasource/test_datasource_manager.py b/api/tests/unit_tests/core/datasource/test_datasource_manager.py index deeac49bbc..8842d678c7 100644 --- a/api/tests/unit_tests/core/datasource/test_datasource_manager.py +++ b/api/tests/unit_tests/core/datasource/test_datasource_manager.py @@ -2,6 +2,7 @@ import types from collections.abc import Generator import pytest +from pytest_mock import MockerFixture from contexts.wrapper import RecyclableContextVar from core.datasource.datasource_manager import DatasourceManager @@ -37,7 +38,7 @@ def _invalidate_recyclable_contextvars() -> None: RecyclableContextVar.increment_thread_recycles() -def test_get_icon_url_calls_runtime(mocker): +def test_get_icon_url_calls_runtime(mocker: MockerFixture): fake_runtime = mocker.Mock() fake_runtime.get_icon_url.return_value = "https://icon" mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=fake_runtime) @@ -52,7 +53,7 @@ def test_get_icon_url_calls_runtime(mocker): DatasourceManager.get_datasource_runtime.assert_called_once() -def test_get_datasource_runtime_delegates_to_provider_controller(mocker): +def test_get_datasource_runtime_delegates_to_provider_controller(mocker: MockerFixture): provider_controller = mocker.Mock() provider_controller.get_datasource.return_value = object() mocker.patch.object(DatasourceManager, "get_datasource_plugin_provider", return_value=provider_controller) @@ -114,7 +115,7 @@ def test_get_datasource_plugin_provider_creates_controller_and_caches(mocker, da assert ctrl_cls.call_count == 1 -def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker): +def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker: MockerFixture): _invalidate_recyclable_contextvars() mocker.patch( "core.datasource.datasource_manager.PluginDatasourceManager.fetch_datasource_provider", @@ -129,7 +130,7 @@ def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mock ) -def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): +def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -145,7 +146,7 @@ def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): ) -def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): +def test_get_datasource_plugin_provider_raises_when_controller_none(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -165,7 +166,7 @@ def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): ) -def test_stream_online_results_yields_messages_online_document(mocker): +def test_stream_online_results_yields_messages_online_document(mocker: MockerFixture): # stub runtime to yield a text message def _doc_messages(**_): yield from _gen_messages_text_only("hello") @@ -195,7 +196,7 @@ def test_stream_online_results_yields_messages_online_document(mocker): assert msgs[0].message.text == "hello" -def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker): +def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -229,7 +230,7 @@ def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_do assert final_value == {} -def test_stream_online_results_raises_when_missing_params(mocker): +def test_stream_online_results_raises_when_missing_params(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -279,7 +280,7 @@ def test_stream_online_results_raises_when_missing_params(mocker): ) -def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker): +def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -313,7 +314,7 @@ def test_stream_online_results_yields_messages_and_returns_empty_dict_online_dri assert final_value == {} -def test_stream_online_results_raises_for_unsupported_stream_type(mocker): +def test_stream_online_results_raises_for_unsupported_stream_type(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=mocker.Mock()) mocker.patch( "core.datasource.datasource_manager.DatasourceProviderService.get_datasource_credentials", @@ -337,7 +338,7 @@ def test_stream_online_results_raises_for_unsupported_stream_type(mocker): ) -def test_stream_node_events_emits_events_online_document(mocker): +def test_stream_node_events_emits_events_online_document(mocker: MockerFixture): # make manager's low-level stream produce TEXT only mocker.patch.object( DatasourceManager, @@ -370,7 +371,7 @@ def test_stream_node_events_emits_events_online_document(mocker): assert events[-1].node_run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED -def test_stream_node_events_builds_file_and_variables_from_messages(mocker): +def test_stream_node_events_builds_file_and_variables_from_messages(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -478,7 +479,7 @@ def test_stream_node_events_builds_file_and_variables_from_messages(mocker): assert events[-1].node_run_result.outputs["x"] == 1 -def test_stream_node_events_raises_when_toolfile_missing(mocker): +def test_stream_node_events_raises_when_toolfile_missing(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -526,7 +527,7 @@ def test_stream_node_events_raises_when_toolfile_missing(mocker): ) -def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker): +def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) file_in = File( @@ -580,7 +581,7 @@ def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(moc assert completed.node_run_result.outputs["datasource_type"] == DatasourceProviderType.ONLINE_DRIVE -def test_stream_node_events_skips_file_build_for_non_online_types(mocker): +def test_stream_node_events_skips_file_build_for_non_online_types(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -620,7 +621,7 @@ def test_stream_node_events_skips_file_build_for_non_online_types(mocker): assert events[-1].node_run_result.outputs["file"] is None -def test_get_upload_file_by_id_builds_file(mocker): +def test_get_upload_file_by_id_builds_file(mocker: MockerFixture): # fake UploadFile row fake_row = types.SimpleNamespace( id="fid", @@ -654,7 +655,7 @@ def test_get_upload_file_by_id_builds_file(mocker): assert f.storage_key == "k" -def test_get_upload_file_by_id_raises_when_missing(mocker): +def test_get_upload_file_by_id_raises_when_missing(mocker: MockerFixture): class _S: def __enter__(self): return self diff --git a/api/tests/unit_tests/core/entities/test_entities_execution_extra_content.py b/api/tests/unit_tests/core/entities/test_entities_execution_extra_content.py index ef8f360dbf..d0849e7b88 100644 --- a/api/tests/unit_tests/core/entities/test_entities_execution_extra_content.py +++ b/api/tests/unit_tests/core/entities/test_entities_execution_extra_content.py @@ -4,8 +4,7 @@ from core.entities.execution_extra_content import ( HumanInputFormDefinition, HumanInputFormSubmissionData, ) -from graphon.nodes.human_input.entities import FormInput, UserAction -from graphon.nodes.human_input.enums import FormInputType +from graphon.nodes.human_input.entities import ParagraphInputConfig, UserActionConfig from models.execution_extra_content import ExecutionContentType @@ -16,8 +15,8 @@ def test_human_input_content_defaults_and_domain_alias() -> None: node_id="node-1", node_title="Human Input", form_content="Please confirm", - inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="answer")], - actions=[UserAction(id="confirm", title="Confirm")], + inputs=[ParagraphInputConfig(output_variable_name="answer")], + actions=[UserActionConfig(id="confirm", title="Confirm")], resolved_default_values={"answer": "yes"}, expiration_time=1_700_000_000, ) @@ -27,6 +26,7 @@ def test_human_input_content_defaults_and_domain_alias() -> None: rendered_content="Please confirm", action_id="confirm", action_text="Confirm", + submitted_data={"answer": "yes"}, ) # Act @@ -42,4 +42,5 @@ def test_human_input_content_defaults_and_domain_alias() -> None: assert content.type == ExecutionContentType.HUMAN_INPUT assert content.form_definition is form_definition assert content.form_submission_data is submission_data + assert content.form_submission_data.submitted_data == {"answer": "yes"} assert ExecutionExtraContentDomainModel is HumanInputContent diff --git a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py index 399b531205..9c1cbe82a0 100644 --- a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py +++ b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py @@ -1,11 +1,12 @@ import httpx import pytest +from pytest_mock import MockerFixture from core.extension.api_based_extension_requestor import APIBasedExtensionRequestor from models.api_based_extension import APIBasedExtensionPoint -def test_request_success(mocker): +def test_request_success(mocker: MockerFixture): # Mock httpx.Client and its context manager mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value @@ -28,7 +29,7 @@ def test_request_success(mocker): ) -def test_request_with_ssrf_proxy(mocker): +def test_request_with_ssrf_proxy(mocker: MockerFixture): # Mock dify_config mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", "https://proxy:8081") @@ -59,7 +60,7 @@ def test_request_with_ssrf_proxy(mocker): assert mock_transport.call_count == 2 -def test_request_with_only_one_proxy_config(mocker): +def test_request_with_only_one_proxy_config(mocker: MockerFixture): # Mock dify_config with only one proxy mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", None) @@ -84,7 +85,7 @@ def test_request_with_only_one_proxy_config(mocker): assert kwargs.get("mounts") is None -def test_request_timeout(mocker): +def test_request_timeout(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -95,7 +96,7 @@ def test_request_timeout(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_connection_error(mocker): +def test_request_connection_error(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -106,7 +107,7 @@ def test_request_connection_error(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code(mocker): +def test_request_error_status_code(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -121,7 +122,7 @@ def test_request_error_status_code(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code_long_content(mocker): +def test_request_error_status_code_long_content(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) diff --git a/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py b/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py index 86b461cf04..c1c1291281 100644 --- a/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py +++ b/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py @@ -13,7 +13,7 @@ class TestExternalDataFetch: app = Flask(__name__) return app - def test_fetch_success(self, app): + def test_fetch_success(self, app: Flask): with app.app_context(): fetcher = ExternalDataFetch() @@ -79,7 +79,7 @@ class TestExternalDataFetch: assert result_inputs == inputs assert result_inputs is not inputs # Should be a copy - def test_fetch_with_none_variable(self, app): + def test_fetch_with_none_variable(self, app: Flask): with app.app_context(): fetcher = ExternalDataFetch() tool = ExternalDataVariableEntity(variable="var1", type="type1", config={}) @@ -95,7 +95,7 @@ class TestExternalDataFetch: assert "var1" not in result_inputs assert result_inputs == {"in": "val"} - def test_query_external_data_tool(self, app): + def test_query_external_data_tool(self, app: Flask): fetcher = ExternalDataFetch() tool = ExternalDataVariableEntity(variable="var1", type="type1", config={"k": "v"}) diff --git a/api/tests/unit_tests/core/helper/test_creators.py b/api/tests/unit_tests/core/helper/test_creators.py index df67d3f513..8750f6d907 100644 --- a/api/tests/unit_tests/core/helper/test_creators.py +++ b/api/tests/unit_tests/core/helper/test_creators.py @@ -8,7 +8,7 @@ from yarl import URL @pytest.fixture(autouse=True) -def _patch_creators_url(monkeypatch): +def _patch_creators_url(monkeypatch: pytest.MonkeyPatch): """Patch the module-level creators_platform_api_url for all tests.""" monkeypatch.setattr( "core.helper.creators.creators_platform_api_url", diff --git a/api/tests/unit_tests/core/ops/test_base_trace_instance.py b/api/tests/unit_tests/core/ops/test_base_trace_instance.py index ac65d13454..15a2af17ca 100644 --- a/api/tests/unit_tests/core/ops/test_base_trace_instance.py +++ b/api/tests/unit_tests/core/ops/test_base_trace_instance.py @@ -18,7 +18,7 @@ class ConcreteTraceInstance(BaseTraceInstance): @pytest.fixture -def mock_db_session(monkeypatch): +def mock_db_session(monkeypatch: pytest.MonkeyPatch): mock_session = MagicMock(spec=Session) mock_session.__enter__.return_value = mock_session mock_session.__exit__.return_value = None diff --git a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py index e47df0121e..33a3293682 100644 --- a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py +++ b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py @@ -203,7 +203,7 @@ class DummySessionContext: @pytest.fixture(autouse=True) -def patch_provider_map(monkeypatch): +def patch_provider_map(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({"dummy": FAKE_PROVIDER_ENTRY}) ) @@ -212,7 +212,7 @@ def patch_provider_map(monkeypatch): @pytest.fixture(autouse=True) -def patch_timer_and_current_app(monkeypatch): +def patch_timer_and_current_app(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.threading.Timer", DummyTimer) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_queue", queue.Queue()) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_timer", None) @@ -227,12 +227,12 @@ def patch_timer_and_current_app(monkeypatch): @pytest.fixture(autouse=True) -def patch_sqlalchemy_session(monkeypatch): +def patch_sqlalchemy_session(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.Session", DummySessionContext) @pytest.fixture -def encryption_mocks(monkeypatch): +def encryption_mocks(monkeypatch: pytest.MonkeyPatch): encrypt_mock = MagicMock(side_effect=lambda tenant, value: f"enc-{value}") batch_decrypt_mock = MagicMock(side_effect=lambda tenant, values: [f"dec-{value}" for value in values]) obfuscate_mock = MagicMock(side_effect=lambda value: f"ob-{value}") @@ -243,7 +243,7 @@ def encryption_mocks(monkeypatch): @pytest.fixture -def mock_db(monkeypatch): +def mock_db(monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.scalars.return_value.all.return_value = ["chat"] db_mock = MagicMock() @@ -254,7 +254,7 @@ def mock_db(monkeypatch): @pytest.fixture -def workflow_repo_fixture(monkeypatch): +def workflow_repo_fixture(monkeypatch: pytest.MonkeyPatch): repo = MagicMock() repo.get_workflow_run_by_id_without_tenant.return_value = make_workflow_run() monkeypatch.setattr(TraceTask, "_get_workflow_run_repo", classmethod(lambda cls: repo)) @@ -340,13 +340,13 @@ def test_get_ops_trace_instance_handles_none_app(mock_db): assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch): +def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": False})) mock_db.get.return_value = app assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch): +def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": True, "tracing_provider": "missing"})) mock_db.get.return_value = app monkeypatch.setattr("core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({})) @@ -388,7 +388,7 @@ def test_get_app_config_through_message_id_app_model_config(mock_db): assert result.id == "cfg" -def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch): +def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): mock_db.get.return_value = None with pytest.raises(ValueError, match="Invalid tracing provider"): OpsTraceManager.update_app_tracing_config("app", True, "bad") @@ -407,21 +407,21 @@ def test_update_app_tracing_config_success(mock_db): def test_get_app_tracing_config_errors_when_missing(mock_db): mock_db.get.return_value = None with pytest.raises(ValueError, match="App not found"): - OpsTraceManager.get_app_tracing_config("app") + OpsTraceManager.get_app_tracing_config("app", mock_db) def test_get_app_tracing_config_returns_defaults(mock_db): mock_db.get.return_value = SimpleNamespace(tracing=None) - assert OpsTraceManager.get_app_tracing_config("app-id") == {"enabled": False, "tracing_provider": None} + assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == {"enabled": False, "tracing_provider": None} def test_get_app_tracing_config_returns_payload(mock_db): payload = {"enabled": True, "tracing_provider": "dummy"} mock_db.get.return_value = SimpleNamespace(tracing=json.dumps(payload)) - assert OpsTraceManager.get_app_tracing_config("app-id") == payload + assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == payload -def test_check_and_project_helpers(monkeypatch): +def test_check_and_project_helpers(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap( @@ -449,7 +449,7 @@ def test_check_and_project_helpers(monkeypatch): assert OpsTraceManager.get_trace_config_project_url({}, "dummy") == "url" -def test_trace_task_conversation_and_extract(monkeypatch): +def test_trace_task_conversation_and_extract(monkeypatch: pytest.MonkeyPatch): task = TraceTask(trace_type=TraceTaskName.CONVERSATION_TRACE, message_id="msg") assert task.conversation_trace(foo="bar") == {"foo": "bar"} assert task._extract_streaming_metrics(make_message_data(message_metadata="not json")) == {} @@ -525,7 +525,7 @@ def test_extract_streaming_metrics_invalid_json(): assert task._extract_streaming_metrics(fake_message) == {} -def test_trace_queue_manager_add_and_collect(monkeypatch): +def test_trace_queue_manager_add_and_collect(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -536,7 +536,7 @@ def test_trace_queue_manager_add_and_collect(monkeypatch): assert tasks == [task] -def test_trace_queue_manager_run_invokes_send(monkeypatch): +def test_trace_queue_manager_run_invokes_send(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -556,7 +556,7 @@ def test_trace_queue_manager_run_invokes_send(monkeypatch): assert called["tasks"] == [task] -def test_trace_queue_manager_send_to_celery(monkeypatch): +def test_trace_queue_manager_send_to_celery(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) diff --git a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py index a4903054e0..13cf01651e 100644 --- a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py +++ b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py @@ -19,7 +19,7 @@ import pytest @pytest.fixture -def trace_queue_manager_and_task(monkeypatch): +def trace_queue_manager_and_task(monkeypatch: pytest.MonkeyPatch): """Fixture to provide TraceQueueManager and TraceTask with delayed imports.""" module_name = "core.ops.ops_trace_manager" if module_name not in sys.modules: diff --git a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py index 1537ffacf5..d8843f0eeb 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.request import PluginInvokeContext from core.plugin.impl.agent import PluginAgentClient @@ -15,7 +17,7 @@ def _agent_provider(name: str = "agent") -> SimpleNamespace: class TestPluginAgentClient: - def test_fetch_agent_strategy_providers(self, mocker): + def test_fetch_agent_strategy_providers(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("remote") @@ -43,7 +45,7 @@ class TestPluginAgentClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.strategies[0].identity.provider == "org/plugin/remote" - def test_fetch_agent_strategy_provider(self, mocker): + def test_fetch_agent_strategy_provider(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("provider") @@ -63,7 +65,7 @@ class TestPluginAgentClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.strategies[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks_and_passes_context(self, mocker): + def test_invoke_merges_chunks_and_passes_context(self, mocker: MockerFixture): client = PluginAgentClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["raw"]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py index 5f564062d5..c2cce5d691 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py @@ -1,12 +1,13 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.plugin.impl.asset import PluginAssetManager class TestPluginAssetManager: - def test_fetch_asset_success(self, mocker): + def test_fetch_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"asset-bytes") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -16,14 +17,14 @@ class TestPluginAssetManager: assert result == b"asset-bytes" request_mock.assert_called_once_with(method="GET", path="plugin/tenant-1/asset/asset-1") - def test_fetch_asset_not_found_raises(self, mocker): + def test_fetch_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) with pytest.raises(ValueError, match="can not found asset asset-1"): manager.fetch_asset("tenant-1", "asset-1") - def test_extract_asset_success(self, mocker): + def test_extract_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"file-content") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -37,7 +38,7 @@ class TestPluginAssetManager: params={"plugin_unique_identifier": "org/plugin:1", "file_path": "README.md"}, ) - def test_extract_asset_not_found_raises(self, mocker): + def test_extract_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) diff --git a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py index 23894bd417..b154f056ca 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.plugin.endpoint.exc import EndpointSetupFailedError from core.plugin.entities.plugin_daemon import PluginDaemonInnerError @@ -39,7 +40,7 @@ class _StreamContext: class TestBasePluginClientImpl: - def test_inject_trace_headers(self, mocker): + def test_inject_trace_headers(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch("core.plugin.impl.base.dify_config.ENABLE_OTEL", True) trace_header = "00-abc-xyz-01" @@ -54,7 +55,7 @@ class TestBasePluginClientImpl: client._inject_trace_headers(headers_with_existing) assert headers_with_existing["TraceParent"] == "exists" - def test_stream_request_handles_data_lines_and_dict_payload(self, mocker): + def test_stream_request_handles_data_lines_and_dict_payload(self, mocker: MockerFixture): client = BasePluginClient() stream_mock = mocker.patch( "httpx.Client.stream", @@ -66,14 +67,14 @@ class TestBasePluginClientImpl: assert result == ["hello", "world"] assert stream_mock.call_args.kwargs["data"] == {"k": "v"} - def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker): + def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", side_effect=RuntimeError("boom")) with pytest.raises(ValueError, match="Failed to request plugin daemon"): client._request_with_plugin_daemon_response("GET", "plugin/tenant/path", bool) - def test_request_with_plugin_daemon_response_applies_transformer(self, mocker): + def test_request_with_plugin_daemon_response_applies_transformer(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", return_value=_ResponseStub({"code": 0, "message": "", "data": True})) @@ -88,14 +89,14 @@ class TestBasePluginClientImpl: assert result is True assert transformed == {"code": 0, "message": "", "data": True} - def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"error":"bad-line"}'])) with pytest.raises(ValueError, match="bad-line"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object( client, "_stream_request", return_value=iter(['{"code":-500,"message":"not-json","data":null}']) @@ -105,14 +106,14 @@ class TestBasePluginClientImpl: list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) assert exc_info.value.message == "not-json" - def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":-1,"message":"err","data":null}'])) with pytest.raises(ValueError, match="plugin daemon: err, code: -1"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":0,"message":"","data":null}'])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py index 4c5987d759..94723dcfe2 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.datasource.entities.datasource_entities import ( GetOnlineDocumentPageContentRequest, OnlineDriveBrowseFilesRequest, @@ -19,7 +21,7 @@ def _datasource_provider(name: str = "provider") -> SimpleNamespace: class TestPluginDatasourceManager: - def test_fetch_datasource_providers(self, mocker): + def test_fetch_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -52,7 +54,7 @@ class TestPluginDatasourceManager: assert result[1].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_installed_datasource_providers(self, mocker): + def test_fetch_installed_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -83,7 +85,7 @@ class TestPluginDatasourceManager: assert result[0].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_datasource_provider_local_and_remote(self, mocker): + def test_fetch_datasource_provider_local_and_remote(self, mocker: MockerFixture): manager = PluginDatasourceManager() local = manager.fetch_datasource_provider("tenant-1", "langgenius/file/file") @@ -113,7 +115,7 @@ class TestPluginDatasourceManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.datasources[0].identity.provider == "org/plugin/provider" - def test_get_website_crawl_streaming(self, mocker): + def test_get_website_crawl_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["crawl"]) @@ -132,7 +134,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_pages_streaming(self, mocker): + def test_get_online_document_pages_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["pages"]) @@ -151,7 +153,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_page_content_streaming(self, mocker): + def test_get_online_document_page_content_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["content"]) @@ -170,7 +172,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_browse_files_streaming(self, mocker): + def test_online_drive_browse_files_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["browse"]) @@ -189,7 +191,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_download_file_streaming(self, mocker): + def test_online_drive_download_file_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["download"]) @@ -208,14 +210,14 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker): + def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([SimpleNamespace(result=True)]) assert manager.validate_provider_credentials("tenant-1", "user-1", "provider", "org/plugin", {"k": "v"}) is True - def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker): + def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py index c80785aee0..05959207b1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py @@ -1,10 +1,12 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.impl.debugging import PluginDebuggingClient class TestPluginDebuggingClient: - def test_get_debugging_key(self, mocker): + def test_get_debugging_key(self, mocker: MockerFixture): client = PluginDebuggingClient() request_mock = mocker.patch.object( client, diff --git a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py index 4cf657a050..7a24cc01d1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py @@ -1,11 +1,12 @@ import pytest +from pytest_mock import MockerFixture from core.plugin.impl.endpoint import PluginEndpointClient from core.plugin.impl.exc import PluginDaemonInternalServerError class TestPluginEndpointClientImpl: - def test_create_endpoint(self, mocker): + def test_create_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -18,7 +19,7 @@ class TestPluginEndpointClientImpl: assert args[:3] == ("POST", "plugin/tenant-1/endpoint/setup", bool) assert kwargs["data"]["plugin_unique_identifier"] == "org/plugin:1" - def test_list_endpoints(self, mocker): + def test_list_endpoints(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -28,7 +29,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list" assert request_mock.call_args.kwargs["params"] == {"page": 2, "page_size": 20} - def test_list_endpoints_for_single_plugin(self, mocker): + def test_list_endpoints_for_single_plugin(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -38,7 +39,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list/plugin" assert request_mock.call_args.kwargs["params"] == {"plugin_id": "org/plugin", "page": 1, "page_size": 10} - def test_update_endpoint(self, mocker): + def test_update_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -47,7 +48,7 @@ class TestPluginEndpointClientImpl: assert result is True assert request_mock.call_args.args[:3] == ("POST", "plugin/tenant-1/endpoint/update", bool) - def test_enable_and_disable_endpoint(self, mocker): + def test_enable_and_disable_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -58,7 +59,7 @@ class TestPluginEndpointClientImpl: assert calls[0].args[1] == "plugin/tenant-1/endpoint/enable" assert calls[1].args[1] == "plugin/tenant-1/endpoint/disable" - def test_delete_endpoint_idempotent_and_re_raise(self, mocker): + def test_delete_endpoint_idempotent_and_re_raise(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response") diff --git a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py index 8c6f1c6b7f..d99a8c114f 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py @@ -1,11 +1,13 @@ import json +from pytest_mock import MockerFixture + from core.plugin.impl import exc as exc_module from core.plugin.impl.exc import PluginDaemonError, PluginInvokeError class TestPluginImplExceptions: - def test_plugin_daemon_error_str_contains_request_id(self, mocker): + def test_plugin_daemon_error_str_contains_request_id(self, mocker: MockerFixture): mocker.patch("core.plugin.impl.exc.get_request_id", return_value="req-123") error = PluginDaemonError("bad") @@ -21,7 +23,7 @@ class TestPluginImplExceptions: assert "RateLimit" in friendly assert "too many" in friendly - def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker): + def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker: MockerFixture): err = PluginInvokeError("plain text") assert err._get_error_object() == {} @@ -32,7 +34,7 @@ class TestPluginImplExceptions: err2 = PluginInvokeError("plain text") assert err2.get_error_message() == "plain text" - def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker): + def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker: MockerFixture): adapter = mocker.patch.object(exc_module, "TypeAdapter") adapter.return_value.validate_json.side_effect = RuntimeError("invalid") diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_client.py b/api/tests/unit_tests/core/plugin/impl/test_model_client.py index bcbebbb38b..6dc572310c 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_client.py @@ -4,13 +4,14 @@ import io from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.plugin.entities.plugin_daemon import PluginDaemonInnerError from core.plugin.impl.model import PluginModelClient class TestPluginModelClient: - def test_fetch_model_providers(self, mocker): + def test_fetch_model_providers(self, mocker: MockerFixture): client = PluginModelClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["provider-a"]) @@ -23,7 +24,7 @@ class TestPluginModelClient: ) assert request_mock.call_args.kwargs["params"] == {"page": 1, "page_size": 256} - def test_get_model_schema(self, mocker): + def test_get_model_schema(self, mocker: MockerFixture): client = PluginModelClient() schema = SimpleNamespace(name="schema") stream_mock = mocker.patch.object( @@ -45,7 +46,7 @@ class TestPluginModelClient: assert result is schema assert stream_mock.call_args.args[:2] == ("POST", "plugin/tenant-1/dispatch/model/schema") - def test_get_model_schema_empty_stream_returns_none(self, mocker): + def test_get_model_schema_empty_stream_returns_none(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -53,7 +54,7 @@ class TestPluginModelClient: assert result is None - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -77,7 +78,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_provider_credentials", ) - def test_validate_provider_credentials_without_dict_update(self, mocker): + def test_validate_provider_credentials_without_dict_update(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -91,13 +92,13 @@ class TestPluginModelClient: assert result is False assert credentials == {"api_key": "same"} - def test_validate_provider_credentials_empty_returns_false(self, mocker): + def test_validate_provider_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.validate_provider_credentials("tenant-1", "user-1", "org/plugin:1", "provider-a", {}) is False - def test_validate_model_credentials(self, mocker): + def test_validate_model_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -123,7 +124,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_model_credentials", ) - def test_validate_model_credentials_empty_returns_false(self, mocker): + def test_validate_model_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -132,7 +133,7 @@ class TestPluginModelClient: is False ) - def test_invoke_llm(self, mocker): + def test_invoke_llm(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk-1"]) @@ -160,7 +161,7 @@ class TestPluginModelClient: assert call_kwargs["data"]["data"]["stream"] is False assert call_kwargs["data"]["data"]["model_parameters"] == {"temperature": 0.1} - def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -182,7 +183,7 @@ class TestPluginModelClient: ) ) - def test_get_llm_num_tokens(self, mocker): + def test_get_llm_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -204,7 +205,7 @@ class TestPluginModelClient: assert result == 42 - def test_get_llm_num_tokens_empty_returns_zero(self, mocker): + def test_get_llm_num_tokens_empty_returns_zero(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -213,7 +214,7 @@ class TestPluginModelClient: == 0 ) - def test_invoke_text_embedding(self, mocker): + def test_invoke_text_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.1, 0.2]]) mocker.patch.object( @@ -233,7 +234,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_text_embedding_empty_raises(self, mocker): + def test_invoke_text_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -242,7 +243,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, ["hello"], "x" ) - def test_invoke_multimodal_embedding(self, mocker): + def test_invoke_multimodal_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.3, 0.4]]) mocker.patch.object( @@ -262,7 +263,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_multimodal_embedding_empty_raises(self, mocker): + def test_invoke_multimodal_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -271,7 +272,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, [{"type": "image"}], "x" ) - def test_get_text_embedding_num_tokens(self, mocker): + def test_get_text_embedding_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -287,7 +288,7 @@ class TestPluginModelClient: 3, ] - def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker): + def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -298,7 +299,7 @@ class TestPluginModelClient: == [] ) - def test_invoke_rerank(self, mocker): + def test_invoke_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.9]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -318,14 +319,14 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_rerank_empty_raises(self, mocker): + def test_invoke_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) with pytest.raises(ValueError, match="Failed to invoke rerank"): client.invoke_rerank("tenant-1", "user-1", "org/plugin:1", "provider-a", "rerank-a", {}, "q", ["doc-1"]) - def test_invoke_multimodal_rerank(self, mocker): + def test_invoke_multimodal_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.8]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -345,7 +346,7 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_multimodal_rerank_empty_raises(self, mocker): + def test_invoke_multimodal_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -361,7 +362,7 @@ class TestPluginModelClient: [{"type": "image"}], ) - def test_invoke_tts(self, mocker): + def test_invoke_tts(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -384,7 +385,7 @@ class TestPluginModelClient: assert result == [b"hello", b"!"] - def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -396,7 +397,7 @@ class TestPluginModelClient: with pytest.raises(ValueError, match="tts error-400"): list(client.invoke_tts("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}, "hello", "alloy")) - def test_get_tts_model_voices(self, mocker): + def test_get_tts_model_voices(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -425,13 +426,13 @@ class TestPluginModelClient: assert result == [{"name": "Alloy", "value": "alloy"}, {"name": "Echo", "value": "echo"}] - def test_get_tts_model_voices_empty_returns_list(self, mocker): + def test_get_tts_model_voices_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.get_tts_model_voices("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}) == [] - def test_invoke_speech_to_text(self, mocker): + def test_invoke_speech_to_text(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -452,7 +453,7 @@ class TestPluginModelClient: assert result == "transcribed text" assert stream_mock.call_args.kwargs["data"]["data"]["file"] == "616263" - def test_invoke_speech_to_text_empty_raises(self, mocker): + def test_invoke_speech_to_text_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -461,7 +462,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "stt-a", {}, io.BytesIO(b"abc") ) - def test_invoke_moderation(self, mocker): + def test_invoke_moderation(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -482,7 +483,7 @@ class TestPluginModelClient: assert result is True assert stream_mock.call_args.kwargs["path"] == "plugin/tenant-1/dispatch/moderation/invoke" - def test_invoke_moderation_empty_raises(self, mocker): + def test_invoke_moderation_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py index 6fb4c99432..f6c9b1c669 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py +++ b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.impl.oauth import OAuthHandler @@ -25,7 +26,7 @@ def _build_request(body: bytes = b"payload") -> Request: class TestOAuthHandler: - def test_get_authorization_url(self, mocker): + def test_get_authorization_url(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -45,7 +46,7 @@ class TestOAuthHandler: assert response.authorization_url == "https://auth.example.com" assert stream_mock.call_count == 1 - def test_get_authorization_url_no_response_raises(self, mocker): + def test_get_authorization_url_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -59,7 +60,7 @@ class TestOAuthHandler: system_credentials={}, ) - def test_get_credentials(self, mocker): + def test_get_credentials(self, mocker: MockerFixture): handler = OAuthHandler() captured_data = {} @@ -85,7 +86,7 @@ class TestOAuthHandler: assert "raw_http_request" in captured_data["data"] assert stream_mock.call_count == 1 - def test_get_credentials_no_response_raises(self, mocker): + def test_get_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -100,7 +101,7 @@ class TestOAuthHandler: request=_build_request(), ) - def test_refresh_credentials(self, mocker): + def test_refresh_credentials(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -121,7 +122,7 @@ class TestOAuthHandler: assert response.credentials == {"token": "new"} assert stream_mock.call_count == 1 - def test_refresh_credentials_no_response_raises(self, mocker): + def test_refresh_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py index 80cf46f9bb..3ae3cc18e4 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.tool import PluginToolManager @@ -15,7 +17,7 @@ def _tool_provider(name: str = "provider") -> SimpleNamespace: class TestPluginToolManager: - def test_fetch_tool_providers(self, mocker): + def test_fetch_tool_providers(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("remote") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -44,7 +46,7 @@ class TestPluginToolManager: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.tools[0].identity.provider == "org/plugin/remote" - def test_fetch_tool_provider(self, mocker): + def test_fetch_tool_provider(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("provider") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -68,7 +70,7 @@ class TestPluginToolManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.tools[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks(self, mocker): + def test_invoke_merges_chunks(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object( manager, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk"]) @@ -92,7 +94,7 @@ class TestPluginToolManager: assert merge_mock.call_count == 1 assert stream_mock.call_args.kwargs["headers"]["X-Plugin-ID"] == "org/plugin" - def test_validate_credentials_paths(self, mocker): + def test_validate_credentials_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") @@ -108,7 +110,7 @@ class TestPluginToolManager: stream_mock.return_value = iter([]) assert manager.validate_datasource_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) is False - def test_get_runtime_parameters_paths(self, mocker): + def test_get_runtime_parameters_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") diff --git a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py index 76da51c2c8..811bb7e50d 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.entities.plugin_daemon import CredentialType @@ -62,7 +63,7 @@ def _subscription_call_kwargs(method_name: str) -> dict: class TestPluginTriggerClient: - def test_fetch_trigger_providers(self, mocker): + def test_fetch_trigger_providers(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("remote") @@ -89,7 +90,7 @@ class TestPluginTriggerClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.events[0].identity.provider == "org/plugin/remote" - def test_fetch_trigger_provider(self, mocker): + def test_fetch_trigger_provider(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("provider") @@ -108,7 +109,7 @@ class TestPluginTriggerClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.events[0].identity.provider == "org/plugin/provider" - def test_invoke_trigger_event(self, mocker): + def test_invoke_trigger_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -132,7 +133,7 @@ class TestPluginTriggerClient: assert result.variables == {"ok": True} assert stream_mock.call_count == 1 - def test_invoke_trigger_event_no_response_raises(self, mocker): + def test_invoke_trigger_event_no_response_raises(self, mocker: MockerFixture): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -150,7 +151,7 @@ class TestPluginTriggerClient: payload={"payload": 1}, ) - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response_stream") @@ -163,7 +164,7 @@ class TestPluginTriggerClient: ): client.validate_provider_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) - def test_dispatch_event(self, mocker): + def test_dispatch_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -195,7 +196,7 @@ class TestPluginTriggerClient: ) @pytest.mark.parametrize("method_name", ["subscribe", "unsubscribe", "refresh"]) - def test_subscription_operations_success(self, mocker, method_name): + def test_subscription_operations_success(self, mocker: MockerFixture, method_name): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -217,7 +218,7 @@ class TestPluginTriggerClient: ("refresh", "No response received from plugin daemon for refresh"), ], ) - def test_subscription_operations_no_response(self, mocker, method_name, expected): + def test_subscription_operations_no_response(self, mocker: MockerFixture, method_name, expected): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) method = getattr(client, method_name) diff --git a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py index 3feb4159ad..2ed7c70ed9 100644 --- a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py +++ b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import BaseModel +from pytest_mock import MockerFixture from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig from core.plugin.backwards_invocation.app import PluginAppBackwardsInvocation @@ -41,7 +42,7 @@ class TestBaseBackwardsInvocation: class TestPluginAppBackwardsInvocation: - def test_fetch_app_info_workflow_path(self, mocker): + def test_fetch_app_info_workflow_path(self, mocker: MockerFixture): workflow = MagicMock() workflow.features_dict = {"feature": "v"} workflow.user_input_form.return_value = [{"name": "foo"}] @@ -57,7 +58,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"data": {"mapped": True}} mapper.assert_called_once_with(features_dict={"feature": "v"}, user_input_form=[{"name": "foo"}]) - def test_fetch_app_info_model_config_path(self, mocker): + def test_fetch_app_info_model_config_path(self, mocker: MockerFixture): model_config = MagicMock() model_config.to_dict.return_value = {"user_input_form": [{"name": "bar"}], "k": "v"} app = MagicMock(mode=AppMode.COMPLETION, app_model_config=model_config) @@ -81,7 +82,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.COMPLETION, "invoke_completion_app"), ], ) - def test_invoke_app_routes_by_mode(self, mocker, mode, route_method): + def test_invoke_app_routes_by_mode(self, mocker: MockerFixture, mode, route_method): app = MagicMock(mode=mode) user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -102,7 +103,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"routed": True} assert route.call_count == 1 - def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker): + def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker: MockerFixture): app = MagicMock(mode=AppMode.WORKFLOW) end_user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -127,7 +128,7 @@ class TestPluginAppBackwardsInvocation: get_or_create.assert_called_once_with(app) assert route.call_args.args[1] is end_user - def test_invoke_app_missing_query_for_chat_raises(self, mocker): + def test_invoke_app_missing_query_for_chat_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode=AppMode.CHAT)) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -143,7 +144,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_app_unexpected_mode_raises(self, mocker): + def test_invoke_app_unexpected_mode_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode="other")) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -166,7 +167,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.CHAT, "core.plugin.backwards_invocation.app.ChatAppGenerator.generate"), ], ) - def test_invoke_chat_app_agent_and_chat(self, mocker, mode, generator_path): + def test_invoke_chat_app_agent_and_chat(self, mocker: MockerFixture, mode, generator_path): app = MagicMock(mode=mode, workflow=None) spy = mocker.patch(generator_path, return_value={"result": "ok"}) @@ -183,7 +184,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"result": "ok"} assert spy.call_count == 1 - def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker): + def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -242,7 +243,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_workflow_app_injects_pause_state_config(self, mocker): + def test_invoke_workflow_app_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -284,7 +285,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_completion_app(self, mocker): + def test_invoke_completion_app(self, mocker: MockerFixture): spy = mocker.patch( "core.plugin.backwards_invocation.app.CompletionAppGenerator.generate", return_value={"ok": 1} ) @@ -295,7 +296,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"ok": 1} assert spy.call_count == 1 - def test_get_user_returns_end_user(self, mocker): + def test_get_user_returns_end_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [MagicMock(id="end-user")] session_ctx = MagicMock() @@ -307,7 +308,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "end-user" - def test_get_user_falls_back_to_account_user(self, mocker): + def test_get_user_falls_back_to_account_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, MagicMock(id="account-user")] session_ctx = MagicMock() @@ -319,7 +320,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "account-user" - def test_get_user_raises_when_user_not_found(self, mocker): + def test_get_user_raises_when_user_not_found(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, None] session_ctx = MagicMock() @@ -331,21 +332,21 @@ class TestPluginAppBackwardsInvocation: with pytest.raises(ValueError, match="user not found"): PluginAppBackwardsInvocation._get_user("uid") - def test_get_app_returns_app(self, mocker): + def test_get_app_returns_app(self, mocker: MockerFixture): app_obj = MagicMock(id="app") db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=app_obj))) mocker.patch("core.plugin.backwards_invocation.app.db", db) assert PluginAppBackwardsInvocation._get_app("app", "tenant") is app_obj - def test_get_app_raises_when_missing(self, mocker): + def test_get_app_raises_when_missing(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=None))) mocker.patch("core.plugin.backwards_invocation.app.db", db) with pytest.raises(ValueError, match="app not found"): PluginAppBackwardsInvocation._get_app("app", "tenant") - def test_get_app_raises_when_query_fails(self, mocker): + def test_get_app_raises_when_query_fails(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(side_effect=RuntimeError("db down")))) mocker.patch("core.plugin.backwards_invocation.app.db", db) diff --git a/api/tests/unit_tests/core/plugin/test_plugin_entities.py b/api/tests/unit_tests/core/plugin/test_plugin_entities.py index f1c4c7e700..deac0ba1da 100644 --- a/api/tests/unit_tests/core/plugin/test_plugin_entities.py +++ b/api/tests/unit_tests/core/plugin/test_plugin_entities.py @@ -5,6 +5,7 @@ from enum import StrEnum import pytest from flask import Response from pydantic import ValidationError +from pytest_mock import MockerFixture from core.plugin.entities.endpoint import EndpointEntityWithInstance from core.plugin.entities.marketplace import MarketplacePluginDeclaration, MarketplacePluginSnapshot @@ -34,7 +35,7 @@ from graphon.model_runtime.entities.message_entities import ( class TestEndpointEntity: - def test_endpoint_entity_with_instance_renders_url(self, mocker): + def test_endpoint_entity_with_instance_renders_url(self, mocker: MockerFixture): mocker.patch("core.plugin.entities.endpoint.dify_config.ENDPOINT_URL_TEMPLATE", "https://dify.test/{hook_id}") now = datetime.datetime.now(datetime.UTC) diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py index 1b114b369a..1f46634b89 100644 --- a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -1,5 +1,7 @@ from uuid import uuid4 +from pytest_mock import MockerFixture + from constants import UUID_NIL from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.prompt.utils.get_thread_messages_length import get_thread_messages_length @@ -103,7 +105,7 @@ def test_extract_thread_messages_breaks_when_parent_is_none(): assert result[0].id == id2 -def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): +def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer=""), # newest generated message should be excluded @@ -119,7 +121,7 @@ def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): mock_scalars.assert_called_once() -def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker): +def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer="latest-answer"), diff --git a/api/tests/unit_tests/core/prompt/test_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_prompt_transform.py index 5308c8e7b3..3d71e73496 100644 --- a/api/tests/unit_tests/core/prompt/test_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_prompt_transform.py @@ -209,7 +209,7 @@ class TestPromptTransform: assert result == ["only"] memory.get_history_prompt_messages.assert_called_with(max_token_limit=10, message_limit=None) - def test_append_chat_histories_extends_prompt_messages(self, monkeypatch): + def test_append_chat_histories_extends_prompt_messages(self, monkeypatch: pytest.MonkeyPatch): transform = PromptTransform() memory = MagicMock() memory_config = SimpleNamespace(window=SimpleNamespace(enabled=False, size=None)) diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 1e91c2dd88..e233bd2ef0 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -67,7 +67,7 @@ def _dataset(dataset_keyword_table=None, keyword_number=None): @pytest.fixture -def patched_runtime(monkeypatch): +def patched_runtime(monkeypatch: pytest.MonkeyPatch): session = MagicMock() db = SimpleNamespace(session=session) storage = MagicMock() @@ -151,7 +151,7 @@ def test_add_texts_without_keywords_list_always_uses_extractor(monkeypatch, patc assert set(keyword._update_segment_keywords.call_args.args[2]) == {"from-extractor"} -def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch): +def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value=None)) @@ -308,7 +308,7 @@ def test_add_and_delete_ids_from_keyword_table_helpers(): assert deleted["kw2"] == {"node-2"} -def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch): +def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) handler = MagicMock() handler.extract_keywords.return_value = ["kw-a", "kw-b"] @@ -350,7 +350,7 @@ def test_update_segment_keywords_updates_when_segment_exists(monkeypatch, patche patched_runtime.session.commit.assert_not_called() -def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): +def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value={})) monkeypatch.setattr(keyword, "_update_segment_keywords", MagicMock()) @@ -365,7 +365,7 @@ def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): keyword._save_dataset_keyword_table.assert_called_once() -def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch): +def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table(), keyword_number=2)) handler = MagicMock() handler.extract_keywords.return_value = {"auto"} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py index a4586c141b..c8ee75bf43 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py @@ -2,6 +2,8 @@ import sys import types from types import SimpleNamespace +import pytest + from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS @@ -38,7 +40,7 @@ def _install_fake_jieba_modules( monkeypatch.delitem(sys.modules, "jieba.analyse.tfidf", raising=False) -def test_init_uses_existing_default_tfidf(monkeypatch): +def test_init_uses_existing_default_tfidf(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") default_tfidf = _DummyTFIDF() analyse_module.default_tfidf = default_tfidf @@ -51,7 +53,7 @@ def test_init_uses_existing_default_tfidf(monkeypatch): assert handler._tfidf.stop_words == STOPWORDS -def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): +def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -67,7 +69,7 @@ def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): +def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -85,7 +87,7 @@ def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): +def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None _install_fake_jieba_modules(monkeypatch, analyse_module) @@ -96,7 +98,7 @@ def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): assert fallback_keywords == ["two"] -def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): +def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules(monkeypatch, analyse_module, jieba_attrs={"lcut": lambda _: ["x", "x", "y"]}) @@ -105,7 +107,7 @@ def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): assert tfidf.extract_tags("ignored", topK=1) == ["x"] -def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch): +def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules( monkeypatch, diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py index 0d969a3270..e1765b17cb 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py @@ -10,7 +10,7 @@ from core.rag.datasource.keyword.keyword_type import KeyWordType from core.rag.models.document import Document -def test_get_keyword_factory_returns_jieba_factory(monkeypatch): +def test_get_keyword_factory_returns_jieba_factory(monkeypatch: pytest.MonkeyPatch): fake_module = types.ModuleType("core.rag.datasource.keyword.jieba.jieba") class FakeJieba: @@ -27,7 +27,7 @@ def test_get_keyword_factory_raises_for_unsupported_type(): Keyword.get_keyword_factory("unsupported") -def test_keyword_initialization_uses_configured_factory(monkeypatch): +def test_keyword_initialization_uses_configured_factory(monkeypatch: pytest.MonkeyPatch): dataset = SimpleNamespace(id="dataset-1") fake_processor = MagicMock() diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index b0ecad4d0c..d38213dd89 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -182,7 +182,7 @@ class TestRetrievalServiceInternals: app.app_context.return_value.__exit__.return_value = False return app - def test_retrieve_with_attachment_ids_only(self, monkeypatch, internal_dataset): + def test_retrieve_with_attachment_ids_only(self, monkeypatch: pytest.MonkeyPatch, internal_dataset): with ( patch("core.rag.datasource.retrieval_service.RetrievalService._get_dataset", return_value=internal_dataset), patch("core.rag.datasource.retrieval_service.RetrievalService._retrieve") as mock_retrieve, @@ -699,7 +699,9 @@ class TestRetrievalServiceInternals: assert RetrievalService.format_retrieval_documents(documents) == [] - def test_format_retrieval_documents_with_parent_child_summary_and_attachments(self, monkeypatch): + def test_format_retrieval_documents_with_parent_child_summary_and_attachments( + self, monkeypatch: pytest.MonkeyPatch + ): dataset_doc_parent = SimpleNamespace( id="doc-parent", doc_form=IndexStructureType.PARENT_CHILD_INDEX, @@ -877,7 +879,7 @@ class TestRetrievalServiceInternals: assert result_by_segment_id["segment-parent-summary"].summary == "summary for parent" assert result_by_segment_id["segment-parent-summary"].child_chunks == [] - def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch): + def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch: pytest.MonkeyPatch): rollback = Mock() monkeypatch.setattr(retrieval_service_module.db.session, "rollback", rollback) monkeypatch.setattr(retrieval_service_module.db.session, "scalars", Mock(side_effect=RuntimeError("db error"))) @@ -936,7 +938,7 @@ class TestRetrievalServiceInternals: future_ok.cancel.assert_called() def test_retrieve_internal_raises_value_error_when_exceptions_exist( - self, monkeypatch, internal_dataset, internal_flask_app + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) @@ -958,7 +960,9 @@ class TestRetrievalServiceInternals: query="query", ) - def test_retrieve_internal_hybrid_weighted_attachment_flow(self, monkeypatch, internal_dataset, internal_flask_app): + def test_retrieve_internal_hybrid_weighted_attachment_flow( + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app + ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index 7b6ee97f1c..067159398d 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -102,7 +102,9 @@ def test_gen_index_struct_dict(vector_factory_module): ("HOLOGRES", "dify_vdb_hologres.hologres_vector", "HologresVectorFactory"), ], ) -def test_get_vector_factory_supported(vector_factory_module, monkeypatch, vector_type, module_path, class_name): +def test_get_vector_factory_supported( + vector_factory_module, monkeypatch: pytest.MonkeyPatch, vector_type, module_path, class_name +): expected_cls = _register_fake_factory_module(monkeypatch, module_path, class_name) result_cls = vector_factory_module.Vector.get_vector_factory(getattr(vector_factory_module.VectorType, vector_type)) @@ -119,7 +121,7 @@ class _PluginChromaFactory: """Stub used only for entry-point override test.""" -def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch): +def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch: pytest.MonkeyPatch): from importlib.metadata import EntryPoint from core.rag.datasource.vdb import vector_backend_registry as reg @@ -171,7 +173,7 @@ def test_vector_init_uses_default_and_custom_attributes(vector_factory_module): assert default_vector._vector_processor == "processor" -def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch): +def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch: pytest.MonkeyPatch): """``Vector(dataset)`` must not transitively call ``ModelManager`` during construction. The real embedding model should only be materialized on the first ``embed_*`` call (i.e. create / search paths) so cleanup paths @@ -214,7 +216,7 @@ def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_m inner_model.embed_documents.assert_called_once_with(["world"]) -def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch): +def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch: pytest.MonkeyPatch): calls = {"vector_type": None, "init_args": None} class _Factory: @@ -242,7 +244,7 @@ def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeyp assert calls["init_args"] == (vector._dataset, ["doc_id"], "embeddings") -def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch): +def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Expr: def __eq__(self, _other): return "expr" @@ -279,7 +281,7 @@ def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch) assert calls["vector_type"] == vector_factory_module.VectorType.TIDB_ON_QDRANT -def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch): +def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE", None) monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE_WHITELIST_ENABLE", False) @@ -343,7 +345,7 @@ def test_create_skips_empty_text_documents_before_embedding(vector_factory_modul vector._vector_processor.create.assert_not_called() -def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch): +def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Field: def in_(self, value): return value @@ -484,7 +486,7 @@ def test_vector_delegation_methods(vector_factory_module): vector._vector_processor.delete_by_metadata_field.assert_called_once_with("doc_id", "doc-1") -def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): +def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch: pytest.MonkeyPatch): vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() @@ -507,7 +509,7 @@ def test_search_by_file_handles_missing_and_existing_upload(vector_factory_modul assert payload["file_id"] == "file-2" -def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch): +def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch: pytest.MonkeyPatch): delete_mock = MagicMock() redis_delete = MagicMock() monkeypatch.setattr(vector_factory_module.redis_client, "delete", redis_delete) @@ -526,7 +528,7 @@ def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, redis_delete.assert_not_called() -def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch): +def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch: pytest.MonkeyPatch): model_manager = MagicMock() model_manager.get_model_instance.return_value = "model-instance" diff --git a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py index e6a06f163e..2e1c5715c2 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py @@ -39,7 +39,7 @@ class TestCSVExtractor: with pytest.raises(ValueError, match="Source column 'missing_col' not found"): extractor.extract() - def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch): + def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=False) def raise_decode(*args, **kwargs): @@ -50,7 +50,7 @@ class TestCSVExtractor: with pytest.raises(RuntimeError, match="Error loading dummy.csv"): extractor.extract() - def test_extract_autodetect_encoding_success(self, monkeypatch): + def test_extract_autodetect_encoding_success(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) attempted_encodings: list[str | None] = [] @@ -75,7 +75,7 @@ class TestCSVExtractor: assert docs[0].page_content == "id: source-1;body: hello" assert attempted_encodings == [None, "bad", "utf-8"] - def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch): + def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) def always_raise(*args, **kwargs): @@ -86,7 +86,7 @@ class TestCSVExtractor: assert extractor.extract() == [] - def test_read_from_file_re_raises_csv_error(self, monkeypatch): + def test_read_from_file_re_raises_csv_error(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv") monkeypatch.setattr(pd, "read_csv", lambda *args, **kwargs: (_ for _ in ()).throw(csv.Error("bad csv"))) diff --git a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py index d2bcc1e2c4..2b42adc716 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py @@ -45,7 +45,7 @@ class _FakeWorkbook: class TestExcelExtractor: - def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch): + def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch: pytest.MonkeyPatch): sheet_with_data = _FakeSheet( header_rows=[("Name", "Link")], data_rows=[ @@ -68,7 +68,7 @@ class TestExcelExtractor: assert docs[1].page_content == '"Name":"";"Link":"123"' assert all(doc.metadata["source"] == "/tmp/sample.xlsx" for doc in docs) - def test_extract_xls_path(self, monkeypatch): + def test_extract_xls_path(self, monkeypatch: pytest.MonkeyPatch): class FakeExcelFile: sheet_names = ["Sheet1"] diff --git a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py index 5beed88971..b4b08f57ec 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py @@ -56,7 +56,7 @@ def _patch_all_extractors(monkeypatch) -> _ExtractorFactory: class TestExtractProcessorLoaders: - def test_load_from_upload_file_return_docs_and_text(self, monkeypatch): + def test_load_from_upload_file_return_docs_and_text(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) monkeypatch.setattr( @@ -93,7 +93,9 @@ class TestExtractProcessorLoaders: ), ], ) - def test_load_from_url_builds_temp_file_with_correct_suffix(self, monkeypatch, url, headers, expected_suffix): + def test_load_from_url_builds_temp_file_with_correct_suffix( + self, monkeypatch: pytest.MonkeyPatch, url, headers, expected_suffix + ): response = SimpleNamespace(headers=headers, content=b"body") monkeypatch.setattr(processor_module.ssrf_proxy, "get", lambda *args, **kwargs: response) monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) @@ -119,11 +121,13 @@ class TestExtractProcessorLoaders: class TestExtractProcessorFileRouting: @pytest.fixture(autouse=True) - def _set_unstructured_config(self, monkeypatch): + def _set_unstructured_config(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_URL", "https://unstructured") monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_KEY", "key") - def _run_extract_for_extension(self, monkeypatch, extension: str, etl_type: str, is_automatic: bool = False): + def _run_extract_for_extension( + self, monkeypatch: pytest.MonkeyPatch, extension: str, etl_type: str, is_automatic: bool = False + ): factory = _patch_all_extractors(monkeypatch) monkeypatch.setattr(processor_module.dify_config, "ETL_TYPE", etl_type) @@ -167,7 +171,7 @@ class TestExtractProcessorFileRouting: ], ) def test_extract_routes_file_extensions_for_unstructured_mode( - self, monkeypatch, extension, expected_extractor, is_automatic + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor, is_automatic ): extractor_name, args, kwargs = self._run_extract_for_extension( monkeypatch, extension, etl_type="Unstructured", is_automatic=is_automatic @@ -189,7 +193,9 @@ class TestExtractProcessorFileRouting: (".txt", "TextExtractor"), ], ) - def test_extract_routes_file_extensions_for_default_mode(self, monkeypatch, extension, expected_extractor): + def test_extract_routes_file_extensions_for_default_mode( + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor + ): extractor_name, _, _ = self._run_extract_for_extension(monkeypatch, extension, etl_type="SelfHosted") assert extractor_name == expected_extractor @@ -202,7 +208,7 @@ class TestExtractProcessorFileRouting: class TestExtractProcessorDatasourceRouting: - def test_extract_routes_notion_datasource(self, monkeypatch): + def test_extract_routes_notion_datasource(self, monkeypatch: pytest.MonkeyPatch): factory = _patch_all_extractors(monkeypatch) notion_info = SimpleNamespace( @@ -228,7 +234,9 @@ class TestExtractProcessorDatasourceRouting: ("jinareader", "JinaReaderWebExtractor"), ], ) - def test_extract_routes_website_datasource_providers(self, monkeypatch, provider: str, expected: str): + def test_extract_routes_website_datasource_providers( + self, monkeypatch: pytest.MonkeyPatch, provider: str, expected: str + ): factory = _patch_all_extractors(monkeypatch) website_info = SimpleNamespace( diff --git a/api/tests/unit_tests/core/rag/extractor/test_helpers.py b/api/tests/unit_tests/core/rag/extractor/test_helpers.py index 74387f749d..1c6f97ec53 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_helpers.py +++ b/api/tests/unit_tests/core/rag/extractor/test_helpers.py @@ -21,7 +21,7 @@ class TestHelpers: # Assert the language field for full coverage assert encodings[0].language is not None - def test_detect_file_encodings_timeout(self, monkeypatch): + def test_detect_file_encodings_timeout(self, monkeypatch: pytest.MonkeyPatch): class FakeFuture: def result(self, timeout=None): raise helpers.concurrent.futures.TimeoutError() @@ -41,7 +41,7 @@ class TestHelpers: with pytest.raises(TimeoutError, match="Timeout reached while detecting encoding"): detect_file_encodings("file.txt", timeout=1) - def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch): + def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch: pytest.MonkeyPatch): class FakeResult: encoding = None coherence = 0.0 diff --git a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py index 7e78c86c7d..8ede44ec04 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py @@ -74,7 +74,7 @@ after assert "[link]" not in tups[1][1] assert "img.png" not in tups[1][1] - def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch): + def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=True) calls: list[str | None] = [] @@ -99,7 +99,7 @@ after assert len(tups) == 2 assert calls == [None, "bad-encoding", "utf-8"] - def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch): + def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=False) def raise_decode(self, encoding=None): @@ -110,7 +110,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch): + def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") def raise_other(self, encoding=None): @@ -121,7 +121,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch): + def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") monkeypatch.setattr(extractor, "parse_tups", lambda _: [(None, "plain"), ("Header", "value")]) diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index 808e41867e..49f7b592dc 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -28,7 +28,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "token" - def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch): + def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -46,7 +46,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "env-token" - def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch): + def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -63,7 +63,7 @@ class TestNotionExtractorInitAndPublicMethods: credential_id="cred", ) - def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch): + def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -83,7 +83,7 @@ class TestNotionExtractorInitAndPublicMethods: load_mock.assert_called_once_with("obj", "page") assert len(docs) == 1 - def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch): + def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -394,7 +394,7 @@ class TestNotionMetadataAndCredentialMethods: assert extractor.update_last_edited_time(None) is None - def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch): + def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -479,7 +479,7 @@ class TestNotionMetadataAndCredentialMethods: with pytest.raises(AssertionError, match="Notion access token is required"): extractor.get_notion_last_edited_time() - def test_get_access_token_success_and_errors(self, monkeypatch): + def test_get_access_token_success_and_errors(self, monkeypatch: pytest.MonkeyPatch): with pytest.raises(Exception, match="No credential id found"): notion_extractor.NotionExtractor._get_access_token("tenant", None) diff --git a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py index 47222a23a2..f2caf02d5e 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py @@ -7,7 +7,7 @@ import core.rag.extractor.pdf_extractor as pe @pytest.fixture -def mock_dependencies(monkeypatch): +def mock_dependencies(monkeypatch: pytest.MonkeyPatch): # Mock storage saves = [] @@ -61,7 +61,9 @@ def mock_dependencies(monkeypatch): (b"\x89PNG\r\n\x1a\n some png", "image/png", "png", "test_file_id_png"), ], ) -def test_extract_images_formats(mock_dependencies, monkeypatch, image_bytes, expected_mime, expected_ext, file_id): +def test_extract_images_formats( + mock_dependencies, monkeypatch: pytest.MonkeyPatch, image_bytes, expected_mime, expected_ext, file_id +): saves = mock_dependencies.saves db_stub = mock_dependencies.db @@ -122,7 +124,7 @@ def test_extract_images_get_objects_scenarios(mock_dependencies, get_objects_sid assert result == "" -def test_extract_calls_extract_images(mock_dependencies, monkeypatch): +def test_extract_calls_extract_images(mock_dependencies, monkeypatch: pytest.MonkeyPatch): # Mock pypdfium2 mock_pdf_doc = MagicMock() mock_page = MagicMock() diff --git a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py index fb3c6e52c6..71046d73af 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py @@ -19,7 +19,7 @@ class TestTextExtractor: assert docs[0].page_content == "hello world" assert docs[0].metadata == {"source": str(file_path)} - def test_extract_autodetect_success_after_decode_error(self, monkeypatch): + def test_extract_autodetect_success_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) calls = [] @@ -44,7 +44,7 @@ class TestTextExtractor: assert docs[0].page_content == "decoded text" assert calls == [None, "bad", "utf-8"] - def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch): + def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) def always_decode_error(self, encoding=None): @@ -56,7 +56,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="all detected encodings failed"): extractor.extract() - def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch): + def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=False) def always_decode_error(self, encoding=None): @@ -67,7 +67,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="specified encoding failed"): extractor.extract() - def test_extract_wraps_non_decode_exceptions(self, monkeypatch): + def test_extract_wraps_non_decode_exceptions(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt") def raise_other(self, encoding=None): diff --git a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py index b9f2449cfb..513d232d7f 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py @@ -61,7 +61,7 @@ def test_parse_row(): assert extractor._parse_row(row, {}, 3) == gt[idx] -def test_init_downloads_via_ssrf_proxy(monkeypatch): +def test_init_downloads_via_ssrf_proxy(monkeypatch: pytest.MonkeyPatch): doc = Document() doc.add_paragraph("hello") buf = io.BytesIO() @@ -97,7 +97,7 @@ def test_init_downloads_via_ssrf_proxy(monkeypatch): extractor.temp_file.close() -def test_extract_images_from_docx(monkeypatch): +def test_extract_images_from_docx(monkeypatch: pytest.MonkeyPatch): external_bytes = b"ext-bytes" internal_bytes = b"int-bytes" @@ -210,7 +210,7 @@ def test_extract_images_from_docx_uses_internal_files_url(): dify_config.INTERNAL_FILES_URL = original_internal_files_url -def test_extract_hyperlinks(monkeypatch): +def test_extract_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage to avoid issues during image extraction (even if no images are present) monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -255,7 +255,7 @@ def test_extract_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_extract_legacy_hyperlinks(monkeypatch): +def test_extract_legacy_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -317,7 +317,7 @@ def test_extract_legacy_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_init_rejects_invalid_url_status(monkeypatch): +def test_init_rejects_invalid_url_status(monkeypatch: pytest.MonkeyPatch): class FakeResponse: status_code = 404 content = b"" @@ -392,7 +392,7 @@ def test_close_closes_awaitable_close_result(): extractor.temp_file.close.assert_called_once() -def test_extract_images_handles_invalid_external_cases(monkeypatch): +def test_extract_images_handles_invalid_external_cases(monkeypatch: pytest.MonkeyPatch): class FakeTargetRef: def __contains__(self, item): return item == "image" @@ -437,7 +437,7 @@ def test_extract_images_handles_invalid_external_cases(monkeypatch): db_stub.session.commit.assert_called_once() -def test_table_to_markdown_and_parse_helpers(monkeypatch): +def test_table_to_markdown_and_parse_helpers(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) table = SimpleNamespace( @@ -500,7 +500,7 @@ def test_table_to_markdown_and_parse_helpers(monkeypatch): assert extractor._parse_cell(cell, image_map) == "EXT-IMGINT-IMGplain" -def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch): +def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) ext_image_id = "ext-image" diff --git a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py index 26ce333e11..19fb385a6d 100644 --- a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py +++ b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py @@ -45,7 +45,7 @@ def _install_chunk_by_title(monkeypatch: pytest.MonkeyPatch, chunks: list[Simple class TestUnstructuredMarkdownMsgXml: - def test_markdown_extractor_without_api(self, monkeypatch): + def test_markdown_extractor_without_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" chunk-1 "), SimpleNamespace(text=" chunk-2 ")]) _register_module( monkeypatch, "unstructured.partition.md", partition_md=lambda filename: [SimpleNamespace(text="x")] @@ -55,7 +55,7 @@ class TestUnstructuredMarkdownMsgXml: assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_markdown_extractor_with_api(self, monkeypatch): + def test_markdown_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" via-api ")]) calls = {} @@ -70,7 +70,7 @@ class TestUnstructuredMarkdownMsgXml: assert docs[0].page_content == "via-api" assert calls == {"filename": "/tmp/file.md", "api_url": "https://u", "api_key": "k"} - def test_msg_extractor_local(self, monkeypatch): + def test_msg_extractor_local(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) _register_module( monkeypatch, "unstructured.partition.msg", partition_msg=lambda filename: [SimpleNamespace(text="x")] @@ -78,7 +78,7 @@ class TestUnstructuredMarkdownMsgXml: assert UnstructuredMsgExtractor("/tmp/file.msg").extract()[0].page_content == "msg-doc" - def test_msg_extractor_with_api(self, monkeypatch): + def test_msg_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) calls = {} @@ -94,7 +94,7 @@ class TestUnstructuredMarkdownMsgXml: ) assert calls["filename"] == "/tmp/file.msg" - def test_xml_extractor_local_and_api(self, monkeypatch): + def test_xml_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="xml-doc")]) xml_calls = {} @@ -124,7 +124,7 @@ class TestUnstructuredMarkdownMsgXml: class TestUnstructuredEmailAndEpub: - def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch): + def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) captured = {} @@ -150,7 +150,7 @@ class TestUnstructuredEmailAndEpub: assert "Hello Email" in chunk_elements[0].text assert chunk_elements[1].text == bad_base64 - def test_email_extractor_with_api(self, monkeypatch): + def test_email_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="api-email")]) _register_module( monkeypatch, @@ -162,7 +162,7 @@ class TestUnstructuredEmailAndEpub: assert docs[0].page_content == "api-email" - def test_epub_extractor_local_and_api(self, monkeypatch): + def test_epub_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="epub-doc")]) calls = {"download": 0, "partition": 0} @@ -198,7 +198,7 @@ class TestUnstructuredPPTAndPPTX: with pytest.raises(NotImplementedError, match="Unstructured API Url is not configured"): UnstructuredPPTExtractor("/tmp/file.ppt").extract() - def test_ppt_extractor_groups_text_by_page(self, monkeypatch): + def test_ppt_extractor_groups_text_by_page(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -215,7 +215,7 @@ class TestUnstructuredPPTAndPPTX: assert [doc.page_content for doc in docs] == ["A\nB", "C"] - def test_pptx_extractor_local_and_api(self, monkeypatch): + def test_pptx_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -244,7 +244,7 @@ class TestUnstructuredPPTAndPPTX: class TestUnstructuredWord: - def _install_doc_modules(self, monkeypatch, version: str, filetype_value): + def _install_doc_modules(self, monkeypatch: pytest.MonkeyPatch, version: str, filetype_value): _register_unstructured_packages(monkeypatch) class FileType: @@ -276,13 +276,13 @@ class TestUnstructuredWord: ], ) - def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch): + def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="doc") with pytest.raises(ValueError, match="Partitioning .doc files is only supported"): UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() - def test_word_extractor_doc_and_docx_paths(self, monkeypatch): + def test_word_extractor_doc_and_docx_paths(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.11", filetype_value="doc") docs = UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() @@ -292,7 +292,7 @@ class TestUnstructuredWord: docs = UnstructuredWordExtractor("/tmp/file.docx", "https://u", "k").extract() assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch): + def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="not-used") monkeypatch.setitem(sys.modules, "magic", None) diff --git a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py index d758be218a..95878fc688 100644 --- a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py @@ -59,7 +59,7 @@ class TestWaterCrawlExceptions: class TestBaseAPIClient: - def test_init_session_builds_expected_headers(self, monkeypatch): + def test_init_session_builds_expected_headers(self, monkeypatch: pytest.MonkeyPatch): captured = {} def fake_client(**kwargs): @@ -74,7 +74,7 @@ class TestBaseAPIClient: assert captured["headers"]["X-API-Key"] == "k" assert captured["headers"]["User-Agent"] == "WaterCrawl-Plugin" - def test_request_stream_and_non_stream_paths(self, monkeypatch): + def test_request_stream_and_non_stream_paths(self, monkeypatch: pytest.MonkeyPatch): class FakeSession: def __init__(self): self.request_calls = [] @@ -106,7 +106,7 @@ class TestBaseAPIClient: assert fake_session.build_calls assert fake_session.send_calls[0][1] is True - def test_http_method_helpers_delegate_to_request(self, monkeypatch): + def test_http_method_helpers_delegate_to_request(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(BaseAPIClient, "init_session", lambda self: MagicMock()) client = BaseAPIClient(api_key="k", base_url="https://watercrawl.dev") @@ -127,7 +127,7 @@ class TestBaseAPIClient: class TestWaterCrawlAPIClient: - def test_process_eventstream_and_download(self, monkeypatch): + def test_process_eventstream_and_download(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = MagicMock() @@ -174,7 +174,7 @@ class TestWaterCrawlAPIClient: client.process_response(_response(200, content_type="application/octet-stream", content=b"bin")) == b"bin" ) - def test_process_response_event_stream_returns_generator(self, monkeypatch): + def test_process_response_event_stream_returns_generator(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") generator = (item for item in [{"type": "result", "data": {}}]) monkeypatch.setattr(client, "process_eventstream", lambda response, download=False: generator) @@ -193,7 +193,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(RuntimeError, match="http error"): client.process_response(response) - def test_endpoint_wrappers(self, monkeypatch): + def test_endpoint_wrappers(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda resp: "processed") @@ -208,7 +208,7 @@ class TestWaterCrawlAPIClient: assert client.download_crawl_request("id") == "processed" assert client.get_crawl_request_results("id") == "processed" - def test_monitor_crawl_request_generator_and_validation(self, monkeypatch): + def test_monitor_crawl_request_generator_and_validation(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda _: (x for x in [{"type": "result", "data": 1}])) @@ -221,7 +221,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(ValueError, match="Generator expected"): list(client.monitor_crawl_request("job-1")) - def test_scrape_url_sync_and_async(self, monkeypatch): + def test_scrape_url_sync_and_async(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "create_crawl_request", lambda **kwargs: {"uuid": "job-1"}) @@ -238,7 +238,7 @@ class TestWaterCrawlAPIClient: sync_result = client.scrape_url("https://example.com", sync=True) assert sync_result == {"url": "https://example.com"} - def test_download_result_fetches_json_and_closes(self, monkeypatch): + def test_download_result_fetches_json_and_closes(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = _response(200, {"markdown": "body"}) @@ -251,7 +251,7 @@ class TestWaterCrawlAPIClient: class TestWaterCrawlProvider: - def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch): + def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") captured_kwargs = {} @@ -290,7 +290,7 @@ class TestWaterCrawlProvider: assert captured_kwargs["page_options"]["only_main_content"] is False assert captured_kwargs["page_options"]["wait_time"] == 1000 - def test_get_crawl_status_active_and_completed(self, monkeypatch): + def test_get_crawl_status_active_and_completed(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( @@ -327,7 +327,7 @@ class TestWaterCrawlProvider: assert completed["status"] == "completed" assert completed["data"] == [{"url": "u"}] - def test_get_crawl_url_data_and_scrape(self, monkeypatch): + def test_get_crawl_url_data_and_scrape(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr(provider, "scrape_url", lambda url: {"source_url": url}) @@ -339,7 +339,7 @@ class TestWaterCrawlProvider: monkeypatch.setattr(provider, "_get_results", lambda job_id, query_params=None: iter([])) assert provider.get_crawl_url_data("job", "u1") is None - def test_structure_data_validation_and_get_results_pagination(self, monkeypatch): + def test_structure_data_validation_and_get_results_pagination(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") with pytest.raises(ValueError, match="Invalid result object"): @@ -380,7 +380,7 @@ class TestWaterCrawlProvider: assert len(results) == 1 assert results[0]["source_url"] == "https://a" - def test_scrape_url_uses_client_and_structure(self, monkeypatch): + def test_scrape_url_uses_client_and_structure(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( provider.client, "scrape_url", lambda **kwargs: {"result": {"metadata": {}, "markdown": "m"}, "url": "u"} @@ -392,7 +392,7 @@ class TestWaterCrawlProvider: class TestWaterCrawlWebExtractor: - def test_extract_crawl_and_scrape_modes(self, monkeypatch): + def test_extract_crawl_and_scrape_modes(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: { @@ -418,7 +418,7 @@ class TestWaterCrawlWebExtractor: assert crawl_extractor.extract()[0].page_content == "crawl" assert scrape_extractor.extract()[0].page_content == "scrape" - def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch): + def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: None, diff --git a/api/tests/unit_tests/core/repositories/test_human_input_form_repository_impl.py b/api/tests/unit_tests/core/repositories/test_human_input_form_repository_impl.py index 18ae9fafc8..a2e10d924c 100644 --- a/api/tests/unit_tests/core/repositories/test_human_input_form_repository_impl.py +++ b/api/tests/unit_tests/core/repositories/test_human_input_form_repository_impl.py @@ -23,7 +23,7 @@ from core.workflow.human_input_adapter import ( ) from graphon.nodes.human_input.entities import ( FormDefinition, - UserAction, + UserActionConfig, ) from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus from libs.datetime_utils import naive_utc_now @@ -272,7 +272,7 @@ def _make_form_definition() -> str: return FormDefinition( form_content="hello", inputs=[], - user_actions=[UserAction(id="submit", title="Submit")], + user_actions=[UserActionConfig(id="submit", title="Submit")], rendered_content="

hello

", expiration_time=naive_utc_now(), ).model_dump_json() diff --git a/api/tests/unit_tests/core/repositories/test_human_input_repository.py b/api/tests/unit_tests/core/repositories/test_human_input_repository.py index 4248782d93..edd8be8618 100644 --- a/api/tests/unit_tests/core/repositories/test_human_input_repository.py +++ b/api/tests/unit_tests/core/repositories/test_human_input_repository.py @@ -29,7 +29,7 @@ from core.workflow.human_input_adapter import ( MemberRecipient, WebAppDeliveryMethod, ) -from graphon.nodes.human_input.entities import HumanInputNodeData, UserAction +from graphon.nodes.human_input.entities import HumanInputNodeData, UserActionConfig from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus from libs.datetime_utils import naive_utc_now from models.human_input import HumanInputFormRecipient, RecipientType @@ -467,7 +467,7 @@ def test_create_form_adds_console_and_backstage_recipients(monkeypatch: pytest.M delivery_methods=[], form_content="hello", inputs=[], - user_actions=[UserAction(id="submit", title="Submit")], + user_actions=[UserActionConfig(id="submit", title="Submit")], ) params = FormCreateParams( workflow_execution_id=None, @@ -586,6 +586,73 @@ def test_mark_submitted_updates_and_raises_when_missing(monkeypatch: pytest.Monk assert record.submitted_data == {"k": "v"} +def test_mark_submitted_serializes_select_and_file_payloads(monkeypatch: pytest.MonkeyPatch) -> None: + fixed_now = datetime(2024, 1, 1, 0, 0, 0) + monkeypatch.setattr("core.repositories.human_input_repository.naive_utc_now", lambda: fixed_now) + + form = _DummyForm( + id="f-complex", + workflow_run_id=None, + node_id="node", + tenant_id="tenant", + app_id="app", + form_definition=_make_form_definition_json(include_expiration_time=True), + rendered_content="

x

", + expiration_time=fixed_now, + ) + recipient = _DummyRecipient( + id="r-complex", + form_id=form.id, + recipient_type=RecipientType.CONSOLE, + access_token="tok", + ) + session = _FakeSession(forms={form.id: form}, recipients={recipient.id: recipient}) + _patch_session_factory(monkeypatch, session) + + payload = { + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/file.txt", + "filename": "file.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + "attachments": [ + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/first.txt", + "filename": "first.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/second.txt", + "filename": "second.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + ], + } + + repo = HumanInputFormSubmissionRepository() + record = repo.mark_submitted( + form_id=form.id, + recipient_id=recipient.id, + selected_action_id="approve", + form_data=payload, + submission_user_id="user-1", + submission_end_user_id="end-user-1", + ) + + assert json.loads(form.submitted_data or "") == payload + assert record.submitted_data == payload + + def test_mark_timeout_invalid_status_raises(monkeypatch: pytest.MonkeyPatch) -> None: form = _DummyForm( id="f", diff --git a/api/tests/unit_tests/core/telemetry/test_facade.py b/api/tests/unit_tests/core/telemetry/test_facade.py index 36e8e1bbb1..95d653f55b 100644 --- a/api/tests/unit_tests/core/telemetry/test_facade.py +++ b/api/tests/unit_tests/core/telemetry/test_facade.py @@ -14,7 +14,7 @@ from core.telemetry.events import TelemetryContext, TelemetryEvent @pytest.fixture -def telemetry_test_setup(monkeypatch): +def telemetry_test_setup(monkeypatch: pytest.MonkeyPatch): module_name = "core.ops.ops_trace_manager" ops_stub = types.ModuleType(module_name) diff --git a/api/tests/unit_tests/core/test_provider_manager.py b/api/tests/unit_tests/core/test_provider_manager.py index a5a542c94f..02f12fb3b4 100644 --- a/api/tests/unit_tests/core/test_provider_manager.py +++ b/api/tests/unit_tests/core/test_provider_manager.py @@ -570,8 +570,7 @@ def test_get_all_providers_normalizes_provider_names_with_model_provider_id() -> session.scalars.return_value = [openai_provider, gemini_provider] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = ProviderManager._get_all_providers("tenant-id") @@ -595,8 +594,7 @@ def test_provider_grouping_helpers_group_records_by_provider_name(method_name: s session.scalars.return_value = [openai_primary, openai_secondary, anthropic_record] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = getattr(ProviderManager, method_name)("tenant-id") @@ -611,8 +609,7 @@ def test_get_all_preferred_model_providers_returns_mapping_by_provider_name() -> session.scalars.return_value = [openai_preference, anthropic_preference] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = ProviderManager._get_all_preferred_model_providers("tenant-id") @@ -626,13 +623,13 @@ def test_get_all_provider_load_balancing_configs_returns_empty_when_cached_flag_ with ( patch("core.provider_manager.redis_client.get", return_value=b"False"), patch("core.provider_manager.FeatureService.get_features") as mock_get_features, - patch("core.provider_manager.Session") as mock_session_cls, + patch("core.provider_manager.session_factory.create_session") as mock_create_session, ): result = ProviderManager._get_all_provider_load_balancing_configs("tenant-id") assert result == {} mock_get_features.assert_not_called() - mock_session_cls.assert_not_called() + mock_create_session.assert_not_called() def test_get_all_provider_load_balancing_configs_populates_cache_and_groups_configs() -> None: @@ -642,14 +639,13 @@ def test_get_all_provider_load_balancing_configs_populates_cache_and_groups_conf session.scalars.return_value = [openai_config, anthropic_config] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), patch("core.provider_manager.redis_client.get", return_value=None), patch("core.provider_manager.redis_client.setex") as mock_setex, patch( "core.provider_manager.FeatureService.get_features", return_value=SimpleNamespace(model_load_balancing_enabled=True), ), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = ProviderManager._get_all_provider_load_balancing_configs("tenant-id") diff --git a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py index ad6d5906ae..b21a5c3e24 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py @@ -78,7 +78,7 @@ def _tool_yaml() -> dict[str, Any]: } -def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch): +def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch: pytest.MonkeyPatch): yaml_payloads = [_provider_yaml(), _tool_yaml()] def _load_yaml(*args, **kwargs): diff --git a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py index c7829fc0d7..3f6b1ec154 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py @@ -115,7 +115,7 @@ def test_weekday_tool(): list(weekday_tool.invoke(user_id="u", tool_parameters={"year": 2024, "day": 1})) -def test_simple_code_valid_execution(monkeypatch): +def test_simple_code_valid_execution(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -138,7 +138,7 @@ def test_simple_code_invalid_language(): list(simple_code.invoke(user_id="u", tool_parameters={"language": "go", "code": "fmt.Println(1)"})) -def test_simple_code_execution_error(monkeypatch): +def test_simple_code_execution_error(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -155,14 +155,14 @@ def test_webscraper_empty_url(): assert empty == "Please input url" -def test_webscraper_fetch(monkeypatch): +def test_webscraper_fetch(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") full = list(webscraper.invoke(user_id="u", tool_parameters={"url": "https://example.com"}))[0].message.text assert full == "page" -def test_webscraper_summary(monkeypatch): +def test_webscraper_summary(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") monkeypatch.setattr(webscraper, "summary", lambda user_id, content: "summary") @@ -175,7 +175,7 @@ def test_webscraper_summary(monkeypatch): assert summarized == "summary" -def test_webscraper_fetch_error(monkeypatch): +def test_webscraper_fetch_error(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr( "core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", @@ -192,7 +192,7 @@ def test_asr_invalid_file(): assert "not a valid audio file" in invalid_file -def test_asr_valid_file_invocation(monkeypatch): +def test_asr_valid_file_invocation(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) model_instance = type("M", (), {"invoke_speech2text": lambda self, file: "transcript"})() model_manager = type("Mgr", (), {"get_model_instance": lambda *a, **k: model_instance})() @@ -209,7 +209,7 @@ def test_asr_valid_file_invocation(monkeypatch): assert captured_manager_kwargs == {"tenant_id": "tenant-1", "user_id": "u"} -def test_asr_available_models_and_runtime_parameters(monkeypatch): +def test_asr_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) provider_model = type("PM", (), {"provider": "p", "models": [type("Model", (), {"model": "m"})()]})() monkeypatch.setattr( @@ -220,7 +220,7 @@ def test_asr_available_models_and_runtime_parameters(monkeypatch): assert asr.get_runtime_parameters()[0].name == "model" -def test_tts_invoke_returns_messages(monkeypatch): +def test_tts_invoke_returns_messages(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) captured_manager_kwargs = {} voices_model_instance = type( @@ -280,7 +280,7 @@ def test_tts_tool_raises_when_voice_unavailable(monkeypatch, voices): list(tts.invoke(user_id="u", tool_parameters={"model": "p#m", "text": "hello"})) -def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): +def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) model_1 = SimpleNamespace( @@ -307,7 +307,7 @@ def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): assert runtime_parameters[1].name == "voice#provider-a#model-a" -def test_provider_classes_and_builtin_sort(monkeypatch): +def test_provider_classes_and_builtin_sort(monkeypatch: pytest.MonkeyPatch): # Use object.__new__ to avoid YAML-loading __init__; only pass-through validation is exercised. # Ensure pass-through _validate_credentials methods are executed. AudioToolProvider._validate_credentials(object.__new__(AudioToolProvider), "u", {}) diff --git a/api/tests/unit_tests/core/tools/test_custom_tool.py b/api/tests/unit_tests/core/tools/test_custom_tool.py index f35546b025..f525baeaf2 100644 --- a/api/tests/unit_tests/core/tools/test_custom_tool.py +++ b/api/tests/unit_tests/core/tools/test_custom_tool.py @@ -47,7 +47,7 @@ def test_parsed_response_to_string(): assert ParsedResponse("ok", False).to_string() == "ok" -def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch): +def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch: pytest.MonkeyPatch): tool = _build_tool() forked = tool.fork_tool_runtime(ToolRuntime(tenant_id="tenant-2")) assert isinstance(forked, ApiTool) @@ -184,7 +184,7 @@ def test_get_parameter_value_and_type_conversion_helpers(): assert tool._convert_body_property_type({"anyOf": [{"type": "integer"}]}, "2") == 2 -def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch): +def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [ {"name": "id", "in": "path", "required": True, "schema": {"type": "string"}}, @@ -236,7 +236,7 @@ def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch invalid_method_tool.do_http_request("https://api.example.com", "TRACE", headers={}, parameters={}) -def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch): +def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [], "requestBody": { diff --git a/api/tests/unit_tests/core/tools/test_tool_manager.py b/api/tests/unit_tests/core/tools/test_tool_manager.py index 9ebaa0417b..7c7d6eec2d 100644 --- a/api/tests/unit_tests/core/tools/test_tool_manager.py +++ b/api/tests/unit_tests/core/tools/test_tool_manager.py @@ -648,7 +648,7 @@ def test_list_default_builtin_providers_for_postgres_and_mysql(): assert providers == provider_records -def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch): +def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch: pytest.MonkeyPatch): hardcoded_controller = SimpleNamespace(entity=SimpleNamespace(identity=SimpleNamespace(name="hardcoded"))) plugin_controller = object.__new__(PluginToolProviderController) plugin_controller.entity = SimpleNamespace(identity=SimpleNamespace(name="plugin-provider")) @@ -925,3 +925,78 @@ def test_convert_tool_parameters_type_constant_branch(): ) assert constant == {"text": "fixed"} + + +def test_convert_tool_parameters_type_model_selector_from_legacy_top_level_config(): + model_param = ToolParameter.get_simple_instance( + name="vision_llm_model", + llm_description="vision model", + typ=ToolParameter.ToolParameterType.MODEL_SELECTOR, + required=True, + ) + model_param.form = ToolParameter.ToolParameterForm.FORM + variable_pool = Mock() + + runtime_parameters = ToolManager._convert_tool_parameters_type( + parameters=[model_param], + variable_pool=variable_pool, + tool_configurations={ + "vision_llm_model": { + "type": "constant", + "value": "", + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + } + }, + typ="workflow", + ) + + assert runtime_parameters == { + "vision_llm_model": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + } + } + + +def test_convert_tool_parameters_type_model_selector_from_constant_value_config(): + model_param = ToolParameter.get_simple_instance( + name="tts_model", + llm_description="tts model", + typ=ToolParameter.ToolParameterType.MODEL_SELECTOR, + required=True, + ) + model_param.form = ToolParameter.ToolParameterForm.FORM + variable_pool = Mock() + + runtime_parameters = ToolManager._convert_tool_parameters_type( + parameters=[model_param], + variable_pool=variable_pool, + tool_configurations={ + "tts_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + }, + } + }, + typ="workflow", + ) + + assert runtime_parameters == { + "tts_model": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + } + } diff --git a/api/tests/unit_tests/core/tools/utils/test_configuration.py b/api/tests/unit_tests/core/tools/utils/test_configuration.py index ae5638784c..9e179536de 100644 --- a/api/tests/unit_tests/core/tools/utils/test_configuration.py +++ b/api/tests/unit_tests/core/tools/utils/test_configuration.py @@ -4,6 +4,8 @@ from collections.abc import Generator from typing import Any from unittest.mock import patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom from core.helper.tool_parameter_cache import ToolParameterCache from core.tools.__base.tool import Tool @@ -110,7 +112,7 @@ def test_encrypt_tool_parameters(): assert encrypted["plain"] == "x" -def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch): +def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( @@ -139,7 +141,7 @@ def test_delete_tool_parameters_cache(): mock_delete.assert_called_once() -def test_configuration_manager_decrypt_suppresses_errors(monkeypatch): +def test_configuration_manager_decrypt_suppresses_errors(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( patch.object(ToolParameterCache, "get", return_value=None), diff --git a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py index 5f34135af4..354b395504 100644 --- a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py +++ b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py @@ -42,7 +42,7 @@ class _FakeToolFileManager: @pytest.fixture(autouse=True) -def _patch_tool_file_manager(monkeypatch): +def _patch_tool_file_manager(monkeypatch: pytest.MonkeyPatch): # Patch the manager used inside the transformer module monkeypatch.setattr(mt, "ToolFileManager", _FakeToolFileManager) # also ensure predictable URL generation (no need to patch; uses id and extension only) diff --git a/api/tests/unit_tests/core/tools/utils/test_parser.py b/api/tests/unit_tests/core/tools/utils/test_parser.py index 032b1377a4..99a90f3b67 100644 --- a/api/tests/unit_tests/core/tools/utils/test_parser.py +++ b/api/tests/unit_tests/core/tools/utils/test_parser.py @@ -17,7 +17,7 @@ def app(): return app -def test_parse_openapi_to_tool_bundle_operation_id(app): +def test_parse_openapi_to_tool_bundle_operation_id(app: Flask): openapi = { "openapi": "3.0.0", "info": {"title": "Simple API", "version": "1.0.0"}, @@ -63,7 +63,7 @@ def test_parse_openapi_to_tool_bundle_operation_id(app): assert tool_bundles[2].operation_id == "createResource" -def test_parse_openapi_to_tool_bundle_properties_all_of(app): +def test_parse_openapi_to_tool_bundle_properties_all_of(app: Flask): openapi = { "openapi": "3.0.0", "info": {"title": "Simple API", "version": "1.0.0"}, @@ -118,7 +118,7 @@ def test_parse_openapi_to_tool_bundle_properties_all_of(app): # assert set(tool_bundles[0].parameters[0].options) == {"option1", "option2", "option3"} -def test_parse_openapi_to_tool_bundle_default_value_type_casting(app): +def test_parse_openapi_to_tool_bundle_default_value_type_casting(app: Flask): """ Test that default values are properly cast to match parameter types. This addresses the issue where array default values like [] cause validation errors diff --git a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py index 6bb86ebe78..081b189745 100644 --- a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py +++ b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py @@ -34,7 +34,7 @@ def test_system_encrypter_raises_error_for_invalid_ciphertext(): encrypter.decrypt_params("not-base64") -def test_system_helpers_use_global_cached_instance(monkeypatch): +def test_system_helpers_use_global_cached_instance(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(encryption, "_encrypter", None) monkeypatch.setattr("core.tools.utils.system_encryption.dify_config.SECRET_KEY", "global-secret") diff --git a/api/tests/unit_tests/core/variables/test_segment_type.py b/api/tests/unit_tests/core/variables/test_segment_type.py index d4e862220a..baa2ac2dc7 100644 --- a/api/tests/unit_tests/core/variables/test_segment_type.py +++ b/api/tests/unit_tests/core/variables/test_segment_type.py @@ -233,7 +233,7 @@ class TestSegmentTypeAdditionalMethods: assert SegmentType.GROUP.is_valid([StringSegment(value="b")]) is True assert SegmentType.GROUP.is_valid(["not-segment"]) is False - def test_unreachable_assertion_branch(self, monkeypatch): + def test_unreachable_assertion_branch(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(SegmentType, "is_array_type", lambda self: False) with pytest.raises(AssertionError, match="unreachable"): diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py index 75bc6d05f7..ad8c0b2a04 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py @@ -12,6 +12,7 @@ from core.repositories.human_input_repository import ( from core.workflow.node_runtime import DifyHumanInputNodeRuntime from core.workflow.system_variables import build_system_variables from graphon.entities import WorkflowStartReason +from graphon.file import File, FileTransferMethod, FileType from graphon.graph import Graph from graphon.graph_engine import GraphEngine, GraphEngineConfig from graphon.graph_engine.command_channels import InMemoryChannel @@ -24,8 +25,15 @@ from graphon.graph_events import ( from graphon.nodes.base.entities import OutputVariableEntity from graphon.nodes.end.end_node import EndNode from graphon.nodes.end.entities import EndNodeData -from graphon.nodes.human_input.entities import HumanInputNodeData, UserAction -from graphon.nodes.human_input.enums import HumanInputFormStatus +from graphon.nodes.human_input.entities import ( + FileInputConfig, + FileListInputConfig, + HumanInputNodeData, + SelectInputConfig, + StringListSource, + UserActionConfig, +) +from graphon.nodes.human_input.enums import HumanInputFormStatus, ValueSourceType from graphon.nodes.human_input.human_input_node import HumanInputNode from graphon.nodes.start.entities import StartNodeData from graphon.nodes.start.start_node import StartNode @@ -106,6 +114,9 @@ class StaticRepo(HumanInputFormRepository): def get_form(self, node_id: str) -> HumanInputFormEntity | None: return self._forms_by_node_id.get(node_id) + def set_forms(self, forms_by_node_id: Mapping[str, HumanInputFormEntity]) -> None: + self._forms_by_node_id = dict(forms_by_node_id) + def create_form(self, params: FormCreateParams) -> HumanInputFormEntity: raise AssertionError("create_form should not be called in resume scenario") @@ -148,8 +159,15 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_data = HumanInputNodeData( title="Human Input", form_content="Human input required", - inputs=[], - user_actions=[UserAction(id="approve", title="Approve")], + inputs=[ + SelectInputConfig( + output_variable_name="decision", + option_source=StringListSource(type=ValueSourceType.CONSTANT, value=["approve", "reject"]), + ), + FileInputConfig(output_variable_name="attachment"), + FileListInputConfig(output_variable_name="attachments", number_limits=2), + ], + user_actions=[UserActionConfig(id="approve", title="Approve")], ) human_a_config = {"id": "human_a", "data": human_data.model_dump()} @@ -175,8 +193,12 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor end_data = EndNodeData( title="End", outputs=[ - OutputVariableEntity(variable="res_a", value_selector=["human_a", "__action_id"]), - OutputVariableEntity(variable="res_b", value_selector=["human_b", "__action_id"]), + OutputVariableEntity(variable="res_a_action", value_selector=["human_a", "__action_id"]), + OutputVariableEntity(variable="res_a_decision", value_selector=["human_a", "decision"]), + OutputVariableEntity(variable="res_a_attachment", value_selector=["human_a", "attachment"]), + OutputVariableEntity(variable="res_b_action", value_selector=["human_b", "__action_id"]), + OutputVariableEntity(variable="res_b_decision", value_selector=["human_b", "decision"]), + OutputVariableEntity(variable="res_b_attachments", value_selector=["human_b", "attachments"]), ], desc=None, ) @@ -214,13 +236,13 @@ def _run_graph(graph: Graph, runtime_state: GraphRuntimeState) -> list[object]: return list(engine.run()) -def _form(submitted: bool, action_id: str | None) -> StaticForm: +def _form(submitted: bool, action_id: str | None, data: Mapping[str, Any] | None = None) -> StaticForm: return StaticForm( form_id="form", rendered="rendered", is_submitted=submitted, action_id=action_id, - data={}, + data=data, status_value=HumanInputFormStatus.SUBMITTED if submitted else HumanInputFormStatus.WAITING, ) @@ -244,7 +266,21 @@ def test_parallel_human_input_join_completes_after_second_resume() -> None: first_resume_state = pause_store.load() first_resume_repo = StaticRepo( { - "human_a": _form(submitted=True, action_id="approve"), + "human_a": _form( + submitted=True, + action_id="approve", + data={ + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/resume.pdf", + "filename": "resume.pdf", + "extension": ".pdf", + "mime_type": "application/pdf", + }, + }, + ), "human_b": _form(submitted=False, action_id=None), } ) @@ -254,19 +290,68 @@ def test_parallel_human_input_join_completes_after_second_resume() -> None: assert isinstance(first_resume_events[0], GraphRunStartedEvent) assert first_resume_events[0].reason is WorkflowStartReason.RESUMPTION assert isinstance(first_resume_events[-1], GraphRunPausedEvent) - pause_store.save(first_resume_state) - - second_resume_state = pause_store.load() - second_resume_repo = StaticRepo( + second_resume_state = first_resume_state + first_resume_repo.set_forms( { - "human_a": _form(submitted=True, action_id="approve"), - "human_b": _form(submitted=True, action_id="approve"), + "human_a": _form( + submitted=True, + action_id="approve", + data={ + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/resume.pdf", + "filename": "resume.pdf", + "extension": ".pdf", + "mime_type": "application/pdf", + }, + }, + ), + "human_b": _form( + submitted=True, + action_id="approve", + data={ + "decision": "reject", + "attachments": [ + { + "type": "image", + "transfer_method": "remote_url", + "remote_url": "https://example.com/a.png", + "filename": "a.png", + "extension": ".png", + "mime_type": "image/png", + }, + { + "type": "image", + "transfer_method": "remote_url", + "remote_url": "https://example.com/b.png", + "filename": "b.png", + "extension": ".png", + "mime_type": "image/png", + }, + ], + }, + ), } ) - second_resume_graph = _build_graph(second_resume_state, second_resume_repo) - second_resume_events = _run_graph(second_resume_graph, second_resume_state) + second_resume_events = _run_graph(first_resume_graph, second_resume_state) assert isinstance(second_resume_events[0], GraphRunStartedEvent) assert second_resume_events[0].reason is WorkflowStartReason.RESUMPTION assert isinstance(second_resume_events[-1], GraphRunSucceededEvent) assert any(isinstance(event, NodeRunSucceededEvent) and event.node_id == "end" for event in second_resume_events) + second_resume_outputs = second_resume_state.outputs + assert second_resume_outputs["res_a_action"] == "approve" + assert second_resume_outputs["res_a_decision"] == "approve" + assert isinstance(second_resume_outputs["res_a_attachment"], File) + res_a_attachment_in_second_outputs = second_resume_outputs["res_a_attachment"] + assert isinstance(res_a_attachment_in_second_outputs, File) + assert res_a_attachment_in_second_outputs.filename == "resume.pdf" + assert res_a_attachment_in_second_outputs.type == FileType.DOCUMENT + assert res_a_attachment_in_second_outputs.transfer_method == FileTransferMethod.REMOTE_URL + assert second_resume_outputs["res_b_action"] == "approve" + assert second_resume_outputs["res_b_decision"] == "reject" + assert isinstance(second_resume_outputs["res_b_attachments"], list) + assert [file.filename for file in second_resume_outputs["res_b_attachments"]] == ["a.png", "b.png"] + assert all(file.type == FileType.IMAGE for file in second_resume_outputs["res_b_attachments"]) diff --git a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py index d7ef781732..a18a36a099 100644 --- a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GraphParams: call_depth = 0 -def test_datasource_node_delegates_to_manager_stream(mocker): +def test_datasource_node_delegates_to_manager_stream(mocker: MockerFixture): # prepare sys variables sys_vars = { "sys": { diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py index 0659984c76..fc7cdd64f9 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py @@ -36,20 +36,25 @@ from graphon.entities import GraphInitParams from graphon.node_events import PauseRequestedEvent from graphon.node_events.node import StreamCompletedEvent from graphon.nodes.human_input.entities import ( - FormInput, - FormInputDefault, + FileInputConfig, + FileListInputConfig, HumanInputNodeData, - UserAction, + ParagraphInputConfig, + SelectInputConfig, + StringListSource, + StringSource, + UserActionConfig, ) from graphon.nodes.human_input.enums import ( ButtonStyle, FormInputType, HumanInputFormStatus, - PlaceholderType, TimeoutUnit, + ValueSourceType, ) from graphon.nodes.human_input.human_input_node import HumanInputNode from graphon.runtime import GraphRuntimeState, VariablePool +from graphon.variables.segments import ArrayFileSegment, FileSegment, StringSegment from libs.datetime_utils import naive_utc_now @@ -195,27 +200,27 @@ class TestFormInput: def test_text_input_with_constant_default(self): """Test text input with constant default value.""" - default = FormInputDefault(type=PlaceholderType.CONSTANT, value="Enter your response here...") + default = StringSource(type=ValueSourceType.CONSTANT, value="Enter your response here...") - form_input = FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="user_input", default=default) + form_input = ParagraphInputConfig(output_variable_name="user_input", default=default) - assert form_input.type == FormInputType.TEXT_INPUT + assert form_input.type == FormInputType.PARAGRAPH assert form_input.output_variable_name == "user_input" - assert form_input.default.type == PlaceholderType.CONSTANT + assert form_input.default.type == ValueSourceType.CONSTANT assert form_input.default.value == "Enter your response here..." def test_text_input_with_variable_default(self): """Test text input with variable default value.""" - default = FormInputDefault(type=PlaceholderType.VARIABLE, selector=["node_123", "output_var"]) + default = StringSource(type=ValueSourceType.VARIABLE, selector=["node_123", "output_var"]) - form_input = FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="user_input", default=default) + form_input = ParagraphInputConfig(output_variable_name="user_input", default=default) - assert form_input.default.type == PlaceholderType.VARIABLE + assert form_input.default.type == ValueSourceType.VARIABLE assert form_input.default.selector == ["node_123", "output_var"] def test_form_input_without_default(self): """Test form input without default value.""" - form_input = FormInput(type=FormInputType.PARAGRAPH, output_variable_name="description") + form_input = ParagraphInputConfig(output_variable_name="description") assert form_input.type == FormInputType.PARAGRAPH assert form_input.output_variable_name == "description" @@ -227,7 +232,7 @@ class TestUserAction: def test_user_action_creation(self): """Test user action creation.""" - action = UserAction(id="approve", title="Approve", button_style=ButtonStyle.PRIMARY) + action = UserActionConfig(id="approve", title="Approve", button_style=ButtonStyle.PRIMARY) assert action.id == "approve" assert action.title == "Approve" @@ -235,13 +240,13 @@ class TestUserAction: def test_user_action_default_button_style(self): """Test user action with default button style.""" - action = UserAction(id="cancel", title="Cancel") + action = UserActionConfig(id="cancel", title="Cancel") assert action.button_style == ButtonStyle.DEFAULT def test_user_action_length_boundaries(self): """Test user action id and title length boundaries.""" - action = UserAction(id="a" * 20, title="b" * 20) + action = UserActionConfig(id="a" * 20, title="b" * 20) assert action.id == "a" * 20 assert action.title == "b" * 20 @@ -259,7 +264,7 @@ class TestUserAction: data[field_name] = value with pytest.raises(ValidationError) as exc_info: - UserAction.model_validate(data) + UserActionConfig.model_validate(data) errors = exc_info.value.errors() assert any(error["loc"] == (field_name,) and error["type"] == "string_too_long" for error in errors) @@ -273,14 +278,13 @@ class TestHumanInputNodeData: delivery_methods = [WebAppDeliveryMethod(enabled=True, config=_WebAppDeliveryConfig())] inputs = [ - FormInput( - type=FormInputType.TEXT_INPUT, + ParagraphInputConfig( output_variable_name="content", - default=FormInputDefault(type=PlaceholderType.CONSTANT, value="Enter content..."), + default=StringSource(type=ValueSourceType.CONSTANT, value="Enter content..."), ) ] - user_actions = [UserAction(id="submit", title="Submit", button_style=ButtonStyle.PRIMARY)] + user_actions = [UserActionConfig(id="submit", title="Submit", button_style=ButtonStyle.PRIMARY)] node_data = HumanInputNodeData( title="Human Input Test", @@ -338,8 +342,8 @@ class TestHumanInputNodeData: def test_duplicate_input_output_variable_name_raises_validation_error(self): """Duplicate form input output_variable_name should raise validation error.""" duplicate_inputs = [ - FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="content"), - FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="content"), + ParagraphInputConfig(output_variable_name="content"), + ParagraphInputConfig(output_variable_name="content"), ] with pytest.raises(ValidationError, match="duplicated output_variable_name 'content'"): @@ -348,8 +352,8 @@ class TestHumanInputNodeData: def test_duplicate_user_action_ids_raise_validation_error(self): """Duplicate user action ids should raise validation error.""" duplicate_actions = [ - UserAction(id="submit", title="Submit"), - UserAction(id="submit", title="Submit Again"), + UserActionConfig(id="submit", title="Submit"), + UserActionConfig(id="submit", title="Submit Again"), ] with pytest.raises(ValidationError, match="duplicated user action id 'submit'"): @@ -458,18 +462,16 @@ class TestHumanInputNodeVariableResolution: title="Human Input", form_content="Provide your name", inputs=[ - FormInput( - type=FormInputType.TEXT_INPUT, + ParagraphInputConfig( output_variable_name="user_name", - default=FormInputDefault(type=PlaceholderType.VARIABLE, selector=["start", "name"]), + default=StringSource(type=ValueSourceType.VARIABLE, selector=["start", "name"]), ), - FormInput( - type=FormInputType.TEXT_INPUT, + ParagraphInputConfig( output_variable_name="user_email", - default=FormInputDefault(type=PlaceholderType.CONSTANT, value="foo@example.com"), + default=StringSource(type=ValueSourceType.CONSTANT, value="foo@example.com"), ), ], - user_actions=[UserAction(id="submit", title="Submit")], + user_actions=[UserActionConfig(id="submit", title="Submit")], ) config = {"id": "human", "data": node_data.model_dump()} @@ -534,7 +536,7 @@ class TestHumanInputNodeVariableResolution: title="Human Input", form_content="Provide your name", inputs=[], - user_actions=[UserAction(id="submit", title="Submit")], + user_actions=[UserActionConfig(id="submit", title="Submit")], ) config = {"id": "human", "data": node_data.model_dump()} @@ -661,7 +663,7 @@ class TestHumanInputNodeVariableResolution: title="Human Input", form_content="Provide your name", inputs=[], - user_actions=[UserAction(id="submit", title="Submit")], + user_actions=[UserActionConfig(id="submit", title="Submit")], delivery_methods=[ EmailDeliveryMethod( enabled=True, @@ -721,15 +723,17 @@ class TestValidation: def test_invalid_form_input_type(self): """Test validation with invalid form input type.""" with pytest.raises(ValidationError): - FormInput( - type="invalid-type", # Invalid type - output_variable_name="test", + ParagraphInputConfig.model_validate( + { + "type": "invalid-type", + "output_variable_name": "test", + } ) def test_invalid_button_style(self): """Test validation with invalid button style.""" with pytest.raises(ValidationError): - UserAction( + UserActionConfig( id="test", title="Test", button_style="invalid-style", # Invalid style @@ -777,13 +781,8 @@ class TestHumanInputNodeRenderedContent: node_data = HumanInputNodeData( title="Human Input", form_content="Name: {{#$output.name#}}", - inputs=[ - FormInput( - type=FormInputType.TEXT_INPUT, - output_variable_name="name", - ) - ], - user_actions=[UserAction(id="approve", title="Approve")], + inputs=[ParagraphInputConfig(output_variable_name="name")], + user_actions=[UserActionConfig(id="approve", title="Approve")], ) config = {"id": "human", "data": node_data.model_dump()} @@ -810,4 +809,115 @@ class TestHumanInputNodeRenderedContent: last_event = events[-1] assert isinstance(last_event, StreamCompletedEvent) node_run_result = last_event.node_run_result - assert node_run_result.outputs["__rendered_content"] == "Name: Alice" + assert node_run_result.outputs["name"] == StringSegment(value="Alice") + assert node_run_result.outputs["__action_id"] == StringSegment(value="approve") + assert node_run_result.outputs["__rendered_content"] == StringSegment(value="Name: Alice") + + def test_resume_restores_file_outputs_as_runtime_segments(self): + variable_pool = VariablePool( + system_variables=build_system_variables( + user_id="user", + app_id="app", + workflow_id="workflow", + workflow_execution_id="run", + ), + user_inputs={}, + conversation_variables=[], + ) + runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=0.0) + graph_init_params = GraphInitParams( + workflow_id="workflow", + graph_config={"nodes": [], "edges": []}, + run_context={ + DIFY_RUN_CONTEXT_KEY: { + "tenant_id": "tenant", + "app_id": "app", + "user_id": "user", + "user_from": "account", + "invoke_from": "debugger", + } + }, + call_depth=0, + ) + + node_data = HumanInputNodeData( + title="Human Input", + form_content=( + "Decision: {{#$output.decision#}}\n" + "Attachment: {{#$output.attachment#}}\n" + "Attachments: {{#$output.attachments#}}" + ), + inputs=[ + SelectInputConfig( + output_variable_name="decision", + option_source=StringListSource(type="constant", value=["approve", "reject"]), + ), + FileInputConfig(output_variable_name="attachment"), + FileListInputConfig(output_variable_name="attachments", number_limits=2), + ], + user_actions=[UserActionConfig(id="approve", title="Approve")], + ) + config = {"id": "human", "data": node_data.model_dump()} + + form_repository = InMemoryHumanInputFormRepository() + runtime = DifyHumanInputNodeRuntime(graph_init_params.run_context) + runtime._build_form_repository = MagicMock(return_value=form_repository) # type: ignore[attr-defined] + node = _build_human_input_node( + node_id=config["id"], + node_data=config["data"], + graph_init_params=graph_init_params, + graph_runtime_state=runtime_state, + runtime=runtime, + ) + + pause_gen = node._run() + pause_event = next(pause_gen) + assert isinstance(pause_event, PauseRequestedEvent) + with pytest.raises(StopIteration): + next(pause_gen) + + form_repository.set_submission( + action_id="approve", + form_data={ + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/resume.pdf", + "filename": "resume.pdf", + "extension": ".pdf", + "mime_type": "application/pdf", + }, + "attachments": [ + { + "type": "image", + "transfer_method": "remote_url", + "remote_url": "https://example.com/a.png", + "filename": "a.png", + "extension": ".png", + "mime_type": "image/png", + }, + { + "type": "image", + "transfer_method": "remote_url", + "remote_url": "https://example.com/b.png", + "filename": "b.png", + "extension": ".png", + "mime_type": "image/png", + }, + ], + }, + ) + + events = list(node._run()) + last_event = events[-1] + assert isinstance(last_event, StreamCompletedEvent) + node_run_result = last_event.node_run_result + assert node_run_result.outputs["decision"] == StringSegment(value="approve") + assert node_run_result.outputs["__rendered_content"] == StringSegment( + value="Decision: approve\nAttachment: [file]\nAttachments: [2 files]" + ) + assert isinstance(node_run_result.outputs["attachment"], FileSegment) + assert node_run_result.outputs["attachment"].value.filename == "resume.pdf" + assert isinstance(node_run_result.outputs["attachments"], ArrayFileSegment) + assert [file.filename for file in node_run_result.outputs["attachments"].value] == ["a.png", "b.png"] diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py index 4a9438b14f..71c1f113a2 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py @@ -6,15 +6,26 @@ from core.workflow.node_runtime import DifyHumanInputNodeRuntime from core.workflow.system_variables import default_system_variables from graphon.entities import GraphInitParams from graphon.enums import BuiltinNodeTypes +from graphon.file import FileTransferMethod, FileType from graphon.graph_events import ( NodeRunHumanInputFormFilledEvent, NodeRunHumanInputFormTimeoutEvent, NodeRunStartedEvent, ) -from graphon.nodes.human_input.entities import HumanInputNodeData +from graphon.nodes.human_input.entities import ( + FileInputConfig, + FileListInputConfig, + HumanInputNodeData, + ParagraphInputConfig, + SelectInputConfig, + StringListSource, + UserActionConfig, +) from graphon.nodes.human_input.enums import HumanInputFormStatus from graphon.nodes.human_input.human_input_node import HumanInputNode from graphon.runtime import GraphRuntimeState, VariablePool +from graphon.variables.segments import ArrayFileSegment, FileSegment, StringSegment +from graphon.variables.types import SegmentType from libs.datetime_utils import naive_utc_now @@ -48,7 +59,14 @@ def _create_human_input_node( ) -def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name#}}") -> HumanInputNode: +def _build_node( + form_content: str = ( + "Please enter your name:\n\n{{#$output.name#}}\n" + "Decision: {{#$output.decision#}}\n" + "Attachment: {{#$output.attachment#}}\n" + "Attachments: {{#$output.attachments#}}" + ), +) -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), @@ -76,19 +94,15 @@ def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name# "title": "Human Input", "form_content": form_content, "inputs": [ - { - "type": "text_input", - "output_variable_name": "name", - "default": {"type": "constant", "value": ""}, - } - ], - "user_actions": [ - { - "id": "Accept", - "title": "Approve", - "button_style": "default", - } + ParagraphInputConfig(output_variable_name="name").model_dump(mode="json"), + SelectInputConfig( + output_variable_name="decision", + option_source=StringListSource(type="constant", value=["approve", "reject"]), + ).model_dump(mode="json"), + FileInputConfig(output_variable_name="attachment").model_dump(mode="json"), + FileListInputConfig(output_variable_name="attachments", number_limits=2).model_dump(mode="json"), ], + "user_actions": [UserActionConfig(id="Accept", title="Approve").model_dump(mode="json")], }, } @@ -97,7 +111,28 @@ def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name# rendered_content=form_content, submitted=True, selected_action_id="Accept", - submitted_data={"name": "Alice"}, + submitted_data={ + "name": "Alice", + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/resume.pdf", + "filename": "resume.pdf", + "extension": ".pdf", + "mime_type": "application/pdf", + }, + "attachments": [ + { + "type": "image", + "transfer_method": "remote_url", + "remote_url": "https://example.com/a.png", + "filename": "a.png", + "extension": ".png", + "mime_type": "image/png", + } + ], + }, status=HumanInputFormStatus.SUBMITTED, expiration_time=naive_utc_now() + datetime.timedelta(days=1), ) @@ -138,20 +173,8 @@ def _build_timeout_node() -> HumanInputNode: "data": { "title": "Human Input", "form_content": "Please enter your name:\n\n{{#$output.name#}}", - "inputs": [ - { - "type": "text_input", - "output_variable_name": "name", - "default": {"type": "constant", "value": ""}, - } - ], - "user_actions": [ - { - "id": "Accept", - "title": "Approve", - "button_style": "default", - } - ], + "inputs": [ParagraphInputConfig(output_variable_name="name").model_dump(mode="json")], + "user_actions": [UserActionConfig(id="Accept", title="Approve").model_dump(mode="json")], }, } @@ -184,9 +207,25 @@ def test_human_input_node_emits_form_filled_event_before_succeeded(): filled_event = events[1] assert filled_event.node_title == "Human Input" - assert filled_event.rendered_content.endswith("Alice") + assert filled_event.rendered_content == ( + "Please enter your name:\n\nAlice\n" + "Decision: approve\n" + "Attachment: [file]\n" + "Attachments: [1 files]" + ) assert filled_event.action_id == "Accept" assert filled_event.action_text == "Approve" + assert filled_event.submitted_data["name"] == StringSegment(value="Alice") + assert filled_event.submitted_data["decision"] == StringSegment(value="approve") + assert isinstance(filled_event.submitted_data["attachment"], FileSegment) + assert filled_event.submitted_data["attachment"].value_type == SegmentType.FILE + assert filled_event.submitted_data["attachment"].value.filename == "resume.pdf" + assert filled_event.submitted_data["attachment"].value.type == FileType.DOCUMENT + assert filled_event.submitted_data["attachment"].value.transfer_method == FileTransferMethod.REMOTE_URL + assert isinstance(filled_event.submitted_data["attachments"], ArrayFileSegment) + assert filled_event.submitted_data["attachments"].value_type == SegmentType.ARRAY_FILE + assert filled_event.submitted_data["attachments"].value[0].filename == "a.png" + assert filled_event.submitted_data["attachments"].value[0].type == FileType.IMAGE def test_human_input_node_emits_timeout_event_before_succeeded(): diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py index f254fc3d09..89433b34e6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.rag.index_processor.constant.index_type import IndexTechniqueType @@ -50,7 +51,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_index_processor(mocker): +def mock_index_processor(mocker: MockerFixture): """Create mock IndexProcessorProtocol.""" mock_processor = Mock(spec=IndexProcessorProtocol) mocker.patch( @@ -61,7 +62,7 @@ def mock_index_processor(mocker): @pytest.fixture -def mock_summary_index_service(mocker): +def mock_summary_index_service(mocker: MockerFixture): """Create mock SummaryIndexServiceProtocol.""" mock_service = Mock(spec=SummaryIndexServiceProtocol) mocker.patch( diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py index e923ee761b..d77a2ce363 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.workflow.nodes.knowledge_retrieval.entities import ( @@ -56,7 +57,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_rag_retrieval(mocker): +def mock_rag_retrieval(mocker: MockerFixture): """Create mock RAGRetrievalProtocol.""" mock_retrieval = Mock(spec=RAGRetrievalProtocol) mock_retrieval.knowledge_retrieval.return_value = [] diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py index c707cf28cd..c09f2d3fb6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py @@ -222,7 +222,7 @@ def llm_node( @pytest.fixture -def model_config(monkeypatch): +def model_config(monkeypatch: pytest.MonkeyPatch): from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass def mock_model_providers(_self): @@ -1276,7 +1276,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown: mock_file_saver.save_binary_string.assert_not_called() mock_file_saver.save_remote_url.assert_not_called() - def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch): + def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch: pytest.MonkeyPatch): llm_node, mock_file_saver = llm_node_for_multimodal image_raw_data = b"PNG_DATA" diff --git a/api/tests/unit_tests/core/workflow/test_form_input_serialization_compat.py b/api/tests/unit_tests/core/workflow/test_form_input_serialization_compat.py new file mode 100644 index 0000000000..cc83a17dfc --- /dev/null +++ b/api/tests/unit_tests/core/workflow/test_form_input_serialization_compat.py @@ -0,0 +1,338 @@ +import json +from typing import Any + +from pydantic import TypeAdapter + +from core.app.entities.task_entities import HumanInputRequiredResponse +from core.entities.execution_extra_content import ( + HumanInputContent, + HumanInputFormDefinition, +) +from graphon.entities.pause_reason import HumanInputRequired +from graphon.nodes.human_input.entities import ( + FormDefinition, + FormInputConfig, + HumanInputNodeData, +) +from graphon.nodes.human_input.enums import ButtonStyle, TimeoutUnit, ValueSourceType + + +def _legacy_form_input_payloads() -> list[dict[str, Any]]: + return [ + { + "type": "paragraph", + "output_variable_name": "name", + "default": { + "type": "constant", + "selector": [], + "value": "Alice", + }, + }, + { + "type": "select", + "output_variable_name": "decision", + "option_source": { + "type": "constant", + "selector": [], + "value": ["approve", "reject"], + }, + }, + { + "type": "file", + "output_variable_name": "attachment", + "allowed_file_types": ["document"], + "allowed_file_extensions": [], + "allowed_file_upload_methods": ["remote_url"], + }, + { + "type": "file-list", + "output_variable_name": "attachments", + "allowed_file_types": ["document"], + "allowed_file_extensions": [], + "allowed_file_upload_methods": ["remote_url"], + "number_limits": 3, + }, + { + "type": "paragraph", + "output_variable_name": "summary", + "default": None, + }, + ] + + +def _legacy_user_action_payloads() -> list[dict[str, Any]]: + return [ + { + "id": "approve", + "title": "Approve", + "button_style": "primary", + }, + { + "id": "reject", + "title": "Reject", + "button_style": "default", + }, + ] + + +def _validate_legacy_json(model_class: type, payload: dict[str, Any]) -> Any: + adapter = TypeAdapter(model_class) + return adapter.validate_json(json.dumps(payload)) + + +def test_form_input_accepts_current_serialized_payload() -> None: + payload = { + "type": "paragraph", + "output_variable_name": "name", + "default": { + "type": "constant", + "selector": [], + "value": "Alice", + }, + } + + restored = _validate_legacy_json(FormInputConfig, payload) + assert restored.default is not None + assert restored.default.type == ValueSourceType.CONSTANT + + +def test_human_input_node_data_accepts_current_serialized_payload() -> None: + payload = { + "type": "human-input", + "title": "Human Input", + "form_content": "Hello {{#$output.name#}}", + "inputs": _legacy_form_input_payloads(), + "user_actions": _legacy_user_action_payloads(), + "timeout": 2, + "timeout_unit": "day", + } + + restored = _validate_legacy_json(HumanInputNodeData, payload) + assert restored.inputs[0].output_variable_name == "name" + assert restored.timeout_unit == TimeoutUnit.DAY + + +def test_form_definition_accepts_current_serialized_payload() -> None: + payload = { + "form_content": "Please confirm", + "inputs": _legacy_form_input_payloads(), + "user_actions": _legacy_user_action_payloads(), + "rendered_content": "Please confirm", + "expiration_time": "2024-01-01T00:00:00Z", + "default_values": {"name": "Alice"}, + "node_title": "Human Input", + "display_in_ui": True, + } + + restored = _validate_legacy_json(FormDefinition, payload) + assert restored.inputs[2].output_variable_name == "attachment" + assert restored.user_actions[0].id == "approve" + assert restored.user_actions[0].button_style == ButtonStyle.PRIMARY + + +def test_human_input_required_pause_reason_accepts_current_serialized_payload() -> None: + payload = { + "TYPE": "human_input_required", + "form_id": "form-1", + "form_content": "Please confirm", + "inputs": _legacy_form_input_payloads(), + "actions": _legacy_user_action_payloads(), + "node_id": "node-1", + "node_title": "Human Input", + "resolved_default_values": {"name": "Alice"}, + } + + restored = _validate_legacy_json(HumanInputRequired, payload) + assert restored.inputs[1].output_variable_name == "decision" + assert restored.actions[0].id == "approve" + assert restored.TYPE == "human_input_required" + + +def test_human_input_form_definition_accepts_current_serialized_payload() -> None: + payload = { + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "Please confirm", + "inputs": _legacy_form_input_payloads(), + "actions": _legacy_user_action_payloads(), + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {"name": "Alice"}, + "expiration_time": 1700000000, + } + + restored = _validate_legacy_json(HumanInputFormDefinition, payload) + assert restored.inputs[3].output_variable_name == "attachments" + assert restored.actions[0].id == "approve" + + +def test_human_input_content_accepts_current_serialized_payload() -> None: + payload = { + "workflow_run_id": "run-1", + "submitted": True, + "form_definition": { + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "Please confirm", + "inputs": _legacy_form_input_payloads(), + "actions": _legacy_user_action_payloads(), + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {"name": "Alice"}, + "expiration_time": 1700000000, + }, + "form_submission_data": { + "node_id": "node-1", + "node_title": "Human Input", + "rendered_content": "Please confirm", + "action_id": "approve", + "action_text": "Approve", + }, + "type": "human_input", + } + + restored = _validate_legacy_json(HumanInputContent, payload) + assert restored.form_definition is not None + assert restored.form_definition.inputs[0].output_variable_name == "name" + + +def test_human_input_content_accepts_current_serialized_payload_with_form_data() -> None: + payload = { + "workflow_run_id": "run-1", + "submitted": True, + "form_definition": { + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "Please confirm", + "inputs": [ + { + "type": "select", + "output_variable_name": "decision", + "option_source": {"type": "constant", "selector": [], "value": ["approve", "reject"]}, + }, + { + "type": "file", + "output_variable_name": "attachment", + "allowed_file_types": ["document"], + "allowed_file_extensions": [], + "allowed_file_upload_methods": ["remote_url"], + }, + { + "type": "file-list", + "output_variable_name": "attachments", + "allowed_file_types": ["document"], + "allowed_file_extensions": [], + "allowed_file_upload_methods": ["remote_url"], + "number_limits": 3, + }, + ], + "actions": _legacy_user_action_payloads(), + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {"decision": "approve"}, + "expiration_time": 1700000000, + }, + "form_submission_data": { + "node_id": "node-1", + "node_title": "Human Input", + "rendered_content": "Please confirm", + "action_id": "approve", + "action_text": "Approve", + "submitted_data": { + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/file.txt", + "filename": "file.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + "attachments": [ + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/first.txt", + "filename": "first.txt", + "extension": ".txt", + "mime_type": "text/plain", + } + ], + }, + }, + "type": "human_input", + } + + restored = HumanInputContent.model_validate_json(json.dumps(payload)) + assert restored.form_submission_data is not None + assert restored.form_submission_data.submitted_data == payload["form_submission_data"]["submitted_data"] + + +def test_human_input_content_accepts_legacy_serialized_payload_with_form_data() -> None: + payload = { + "workflow_run_id": "run-1", + "submitted": True, + "form_definition": { + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "Please confirm", + "inputs": _legacy_form_input_payloads(), + "actions": _legacy_user_action_payloads(), + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {"decision": "approve"}, + "expiration_time": 1700000000, + }, + "form_submission_data": { + "node_id": "node-1", + "node_title": "Human Input", + "rendered_content": "Please confirm", + "action_id": "approve", + "action_text": "Approve", + "form_data": { + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/file.txt", + "filename": "file.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + }, + }, + "type": "human_input", + } + + restored = HumanInputContent.model_validate_json(json.dumps(payload)) + assert restored.form_submission_data is not None + assert restored.form_submission_data.submitted_data is None + + +def test_human_input_required_response_accepts_current_serialized_payload() -> None: + payload = { + "event": "human_input_required", + "task_id": "task-1", + "workflow_run_id": "run-1", + "data": { + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "Please confirm", + "inputs": _legacy_form_input_payloads(), + "actions": _legacy_user_action_payloads(), + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {"name": "Alice"}, + "expiration_time": 1700000000, + }, + } + + restored = _validate_legacy_json(HumanInputRequiredResponse, payload) + assert restored.data.inputs[1].output_variable_name == "decision" + assert restored.data.actions[0].id == "approve" + assert restored.event == "human_input_required" diff --git a/api/tests/unit_tests/core/workflow/test_human_input_adapter.py b/api/tests/unit_tests/core/workflow/test_human_input_adapter.py index 8b5fceeb37..51049f8792 100644 --- a/api/tests/unit_tests/core/workflow/test_human_input_adapter.py +++ b/api/tests/unit_tests/core/workflow/test_human_input_adapter.py @@ -166,6 +166,71 @@ def test_adapt_node_data_for_graph_migrates_legacy_tool_configurations() -> None } +def test_adapt_node_data_for_graph_preserves_model_selector_top_level_configurations() -> None: + normalized = adapt_node_data_for_graph( + { + "type": BuiltinNodeTypes.TOOL, + "tool_configurations": { + "vision_llm_model": { + "type": "constant", + "value": "", + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + }, + }, + } + ) + + assert normalized["tool_configurations"] == {} + assert normalized["tool_parameters"] == { + "vision_llm_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + }, + } + } + + +def test_adapt_node_data_for_graph_flattens_constant_model_selector_value() -> None: + normalized = adapt_node_data_for_graph( + { + "type": BuiltinNodeTypes.TOOL, + "tool_configurations": { + "tts_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + }, + }, + }, + } + ) + + assert normalized["tool_configurations"] == {} + assert normalized["tool_parameters"] == { + "tts_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + }, + } + } + + def test_adapt_node_config_for_graph_rewrites_nested_node_data() -> None: normalized = adapt_node_config_for_graph( { diff --git a/api/tests/unit_tests/core/workflow/test_node_factory.py b/api/tests/unit_tests/core/workflow/test_node_factory.py index 1821f72e0c..e93a7c7ccd 100644 --- a/api/tests/unit_tests/core/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/workflow/test_node_factory.py @@ -88,7 +88,7 @@ class TestFetchMemory: assert result is None - def test_returns_none_when_conversation_does_not_exist(self, monkeypatch): + def test_returns_none_when_conversation_does_not_exist(self, monkeypatch: pytest.MonkeyPatch): class FakeSelect: def where(self, *_args): return self @@ -119,7 +119,7 @@ class TestFetchMemory: assert result is None - def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch): + def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch: pytest.MonkeyPatch): conversation = sentinel.conversation memory = sentinel.memory @@ -189,7 +189,7 @@ class TestDifyGraphInitContext: class TestDefaultWorkflowCodeExecutor: - def test_execute_delegates_to_code_executor(self, monkeypatch): + def test_execute_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): executor = node_factory.DefaultWorkflowCodeExecutor() execute_workflow_code_template = MagicMock(return_value={"answer": "ok"}) monkeypatch.setattr( @@ -219,7 +219,7 @@ class TestDefaultWorkflowCodeExecutor: class TestCodeExecutorJinja2TemplateRenderer: - def test_render_template_delegates_to_code_executor(self, monkeypatch): + def test_render_template_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() execute_workflow_code_template = MagicMock(return_value={"result": "Hello workflow"}) monkeypatch.setattr( @@ -237,7 +237,7 @@ class TestCodeExecutorJinja2TemplateRenderer: inputs={"name": "workflow"}, ) - def test_render_template_wraps_code_execution_errors(self, monkeypatch): + def test_render_template_wraps_code_execution_errors(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() monkeypatch.setattr( workflow_template_rendering.CodeExecutor, @@ -434,7 +434,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: missing"): factory.create_node({"id": "node-id", "data": {"type": "missing"}}) - def test_rejects_missing_class_mapping(self, monkeypatch, factory): + def test_rejects_missing_class_mapping(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -444,7 +444,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_rejects_missing_latest_class(self, monkeypatch, factory): + def test_rejects_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -454,7 +454,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No latest version class found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_uses_version_specific_class_when_available(self, monkeypatch, factory): + def test_uses_version_specific_class_when_available(self, monkeypatch: pytest.MonkeyPatch, factory): matched_node = sentinel.matched_node latest_node_class = _node_constructor(return_value=sentinel.latest_node) matched_node_class = _node_constructor(return_value=matched_node) @@ -475,7 +475,9 @@ class TestDifyNodeFactoryCreateNode: assert kwargs["graph_runtime_state"] is factory.graph_runtime_state latest_node_class.assert_not_called() - def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing(self, monkeypatch, factory): + def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing( + self, monkeypatch: pytest.MonkeyPatch, factory + ): latest_node = sentinel.latest_node latest_node_class = _node_constructor(return_value=latest_node) monkeypatch.setattr( @@ -507,7 +509,7 @@ class TestDifyNodeFactoryCreateNode: (BuiltinNodeTypes.DOCUMENT_EXTRACTOR, "DocumentExtractorNode"), ], ) - def test_creates_specialized_nodes(self, monkeypatch, factory, node_type, constructor_name): + def test_creates_specialized_nodes(self, monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name): created_node = object() constructor = _node_constructor(return_value=created_node) constructor._mock_name = constructor_name @@ -597,7 +599,9 @@ class TestDifyNodeFactoryCreateNode: prepared_llm.assert_called_once_with(sentinel.model_instance) assert kwargs["model_instance"] is wrapped_model_instance - def test_create_node_passes_alias_preserving_llm_config_to_constructor(self, monkeypatch, factory): + def test_create_node_passes_alias_preserving_llm_config_to_constructor( + self, monkeypatch: pytest.MonkeyPatch, factory + ): created_node = object() constructor = _node_constructor(return_value=created_node) monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor)) @@ -665,7 +669,7 @@ class TestDifyNodeFactoryCreateNode: ) def test_creates_model_backed_nodes( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name, @@ -726,7 +730,7 @@ class TestDifyNodeFactoryModelInstance: factory._llm_model_factory = sentinel.model_factory return factory - def test_delegates_to_fetch_model_config(self, monkeypatch, factory): + def test_delegates_to_fetch_model_config(self, monkeypatch: pytest.MonkeyPatch, factory): node_data_model = SimpleNamespace( provider="provider", name="model", @@ -755,7 +759,7 @@ class TestDifyNodeFactoryModelInstance: model_factory=sentinel.model_factory, ) - def test_propagates_fetch_model_config_errors(self, monkeypatch, factory): + def test_propagates_fetch_model_config_errors(self, monkeypatch: pytest.MonkeyPatch, factory): fetch_model_config = MagicMock(side_effect=ValueError("broken model config")) monkeypatch.setattr(node_factory, "fetch_model_config", fetch_model_config) @@ -780,7 +784,7 @@ class TestDifyNodeFactoryMemory: assert result is None factory.graph_runtime_state.variable_pool.get.assert_not_called() - def test_uses_string_segment_conversation_id(self, monkeypatch, factory): + def test_uses_string_segment_conversation_id(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = StringSegment(value="conversation-id") fetch_memory = MagicMock(return_value=sentinel.memory) @@ -800,7 +804,7 @@ class TestDifyNodeFactoryMemory: model_instance=sentinel.model_instance, ) - def test_ignores_non_string_segment_conversation_ids(self, monkeypatch, factory): + def test_ignores_non_string_segment_conversation_ids(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = sentinel.segment fetch_memory = MagicMock(return_value=sentinel.memory) diff --git a/api/tests/unit_tests/core/workflow/test_node_runtime.py b/api/tests/unit_tests/core/workflow/test_node_runtime.py index 5a43369a1a..874873d57e 100644 --- a/api/tests/unit_tests/core/workflow/test_node_runtime.py +++ b/api/tests/unit_tests/core/workflow/test_node_runtime.py @@ -22,14 +22,17 @@ from core.workflow.node_runtime import ( DifyPromptMessageSerializer, DifyRetrieverAttachmentLoader, DifyToolFileManager, + DifyToolNodeRuntime, apply_dify_debug_email_recipient, build_dify_llm_file_saver, resolve_dify_run_context, ) -from graphon.file import FileTransferMethod, FileType +from graphon.file import File, FileTransferMethod, FileType from graphon.model_runtime.entities.common_entities import I18nObject from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from graphon.nodes.human_input.entities import HumanInputNodeData +from graphon.nodes.human_input.entities import FileInputConfig, FileListInputConfig, HumanInputNodeData +from graphon.nodes.tool.entities import ToolNodeData, ToolProviderType +from graphon.variables.segments import ArrayFileSegment, FileSegment from tests.workflow_test_utils import build_test_run_context @@ -334,6 +337,41 @@ def test_dify_human_input_runtime_builds_debug_repository(monkeypatch: pytest.Mo ) +def test_dify_tool_runtime_spec_prefers_tool_parameters_for_runtime_form_values() -> None: + node_data = ToolNodeData( + provider_id="video-mixcut-agent", + provider_type=ToolProviderType.PLUGIN, + provider_name="sawyer-shi/video-mixcut-agent", + tool_name="mixcut", + tool_label="MixCut", + tool_configurations={"count": 2}, + tool_parameters={ + "vision_llm_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + }, + } + }, + ) + + spec = DifyToolNodeRuntime._build_tool_runtime_spec(node_data) + + assert spec.tool_configurations == { + "count": 2, + "vision_llm_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + }, + }, + } + + def test_dify_human_input_runtime_create_form_filters_debugger_delivery_methods() -> None: repository = MagicMock() repository.create_form.return_value = sentinel.form @@ -399,6 +437,70 @@ def test_dify_human_input_runtime_preserves_webapp_delivery_for_web_invocations( assert params.delivery_methods[1].config.recipients.include_bound_group is True +def test_dify_human_input_runtime_restore_submitted_data_rehydrates_files() -> None: + runtime = DifyHumanInputNodeRuntime(_build_run_context()) + file_value = File( + file_id="file-1", + file_type=FileType.DOCUMENT, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id="upload-1", + filename="resume.pdf", + extension=".pdf", + mime_type="application/pdf", + size=128, + ) + file_list_value = [ + File( + file_id="file-2", + file_type=FileType.DOCUMENT, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id="upload-2", + filename="first.pdf", + extension=".pdf", + mime_type="application/pdf", + size=64, + ), + File( + file_id="file-3", + file_type=FileType.DOCUMENT, + transfer_method=FileTransferMethod.REMOTE_URL, + remote_url="https://example.com/second.pdf", + filename="second.pdf", + extension=".pdf", + mime_type="application/pdf", + size=96, + ), + ] + runtime._file_reference_factory.build_from_mapping = MagicMock(side_effect=[file_value, *file_list_value]) # type: ignore[method-assign] + node_data = HumanInputNodeData( + title="Human Input", + inputs=[ + FileInputConfig(output_variable_name="attachment"), + FileListInputConfig(output_variable_name="attachments", number_limits=2), + ], + ) + + restored = runtime.restore_submitted_data( + node_data=node_data, + submitted_data={ + "attachment": {"upload_file_id": "upload-1", "type": "document", "transfer_method": "local_file"}, + "attachments": [ + {"upload_file_id": "upload-2", "type": "document", "transfer_method": "local_file"}, + { + "url": "https://example.com/second.pdf", + "type": "document", + "transfer_method": "remote_url", + }, + ], + }, + ) + + assert restored["attachment"] is file_value + assert restored["attachments"] == file_list_value + assert isinstance(FileSegment(value=restored["attachment"]), FileSegment) + assert isinstance(ArrayFileSegment(value=restored["attachments"]), ArrayFileSegment) + + def test_build_dify_llm_file_saver_wires_runtime_adapters(monkeypatch: pytest.MonkeyPatch) -> None: file_saver_cls = MagicMock(return_value=sentinel.file_saver) monkeypatch.setattr("graphon.nodes.llm.file_saver.FileSaverImpl", file_saver_cls) diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry.py b/api/tests/unit_tests/core/workflow/test_workflow_entry.py index 041c5cc612..2e9e3468fd 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry.py @@ -19,7 +19,7 @@ from graphon.variables.variables import StringVariable @pytest.fixture(autouse=True) -def _mock_ssrf_head(monkeypatch): +def _mock_ssrf_head(monkeypatch: pytest.MonkeyPatch): """Avoid any real network requests during tests. factories.file_factory.remote.get_remote_file_info() uses ssrf_proxy.head diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py index 270d0bf90d..3978cbb1a0 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py @@ -603,7 +603,7 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_rejects_missing_node_class(self, monkeypatch): + def test_run_free_node_rejects_missing_node_class(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( workflow_entry, "resolve_workflow_node_class", @@ -619,7 +619,9 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented(self, monkeypatch): + def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented( + self, monkeypatch: pytest.MonkeyPatch + ): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): @@ -707,7 +709,7 @@ class TestWorkflowEntryHelpers: tenant_id="tenant-id", ) - def test_run_free_node_wraps_execution_failures(self, monkeypatch): + def test_run_free_node_wraps_execution_failures(self, monkeypatch: pytest.MonkeyPatch): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): diff --git a/api/tests/unit_tests/extensions/test_ext_request_logging.py b/api/tests/unit_tests/extensions/test_ext_request_logging.py index dcb457c806..03479686bb 100644 --- a/api/tests/unit_tests/extensions/test_ext_request_logging.py +++ b/api/tests/unit_tests/extensions/test_ext_request_logging.py @@ -71,7 +71,7 @@ def enable_request_logging(monkeypatch: pytest.MonkeyPatch): class TestRequestLoggingExtension: def test_receiver_should_not_be_invoked_if_configuration_is_disabled( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_request_receiver, mock_response_receiver, ): @@ -266,7 +266,9 @@ class TestResponseUnmodified: class TestRequestFinishedInfoAccessLine: - def test_info_access_log_includes_method_path_status_duration_trace_id(self, monkeypatch, caplog): + def test_info_access_log_includes_method_path_status_duration_trace_id( + self, monkeypatch: pytest.MonkeyPatch, caplog + ): """Ensure INFO access line contains expected fields with computed duration and trace id.""" app = _get_test_app() # Push a real request context so flask.request and g are available @@ -299,7 +301,7 @@ class TestRequestFinishedInfoAccessLine: assert "123.456" in msg # rounded to 3 decimals assert "trace-xyz" in msg - def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch, caplog): + def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch: pytest.MonkeyPatch, caplog): app = _get_test_app() with app.test_request_context("/bar", method="POST"): # No g.__request_started_ts set -> duration should be '-' diff --git a/api/tests/unit_tests/extensions/test_pubsub_channel.py b/api/tests/unit_tests/extensions/test_pubsub_channel.py index 926c406ad4..24bbf55cb3 100644 --- a/api/tests/unit_tests/extensions/test_pubsub_channel.py +++ b/api/tests/unit_tests/extensions/test_pubsub_channel.py @@ -1,10 +1,12 @@ +import pytest + from configs import dify_config from extensions import ext_redis from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel from libs.broadcast_channel.redis.sharded_channel import ShardedRedisBroadcastChannel -def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): +def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) @@ -13,7 +15,7 @@ def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): assert isinstance(channel, RedisBroadcastChannel) -def test_get_pubsub_broadcast_channel_sharded(monkeypatch): +def test_get_pubsub_broadcast_channel_sharded(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) diff --git a/api/tests/unit_tests/factories/test_file_factory.py b/api/tests/unit_tests/factories/test_file_factory.py index c2835c4124..293be925ae 100644 --- a/api/tests/unit_tests/factories/test_file_factory.py +++ b/api/tests/unit_tests/factories/test_file_factory.py @@ -1,8 +1,11 @@ import re +from unittest.mock import MagicMock import pytest +from factories.file_factory import builders from factories.file_factory.remote import extract_filename, get_remote_file_info +from graphon.file import FileTransferMethod class _FakeResponse: @@ -291,3 +294,92 @@ class TestExtractFilename: """Test bare path (not full URL) with query string.""" result = extract_filename("/path/to/file.txt?extra=params", None) assert result == "file.txt" + + +class TestBuildFromDatasourceFile: + """Tests for _build_from_datasource_file extension handling.""" + + @staticmethod + def _patch_session(monkeypatch: pytest.MonkeyPatch, datasource_file): + """Stub session_factory.create_session() so it returns the given UploadFile-shaped record.""" + session = MagicMock() + session.scalar.return_value = datasource_file + ctx = MagicMock() + ctx.__enter__ = MagicMock(return_value=session) + ctx.__exit__ = MagicMock(return_value=False) + monkeypatch.setattr(builders.session_factory, "create_session", lambda: ctx) + + def _make_datasource_file(self, *, key: str, mime_type: str = "text/csv"): + f = MagicMock() + f.id = "file-id" + f.key = key + f.name = key.split("/")[-1] + f.mime_type = mime_type + f.size = 123 + f.source_url = f"https://example.com/{key}" + return f + + def test_extension_passed_without_doubled_dot(self, monkeypatch: pytest.MonkeyPatch): + """Regression: standardize_file_type must receive the extension exactly once-prefixed. + + Previously the call was ``standardize_file_type(extension="." + extension, ...)`` while + ``extension`` already had a leading dot, producing ``"..csv"``. The mitigating + ``lstrip(".")`` inside ``standardize_file_type`` masked the bug from end users, but the + argument shape itself was wrong and showed up in any caller that didn't strip dots. + """ + captured: dict = {} + + def fake_standardize(*, extension: str = "", mime_type: str = ""): + from graphon.file import FileType + + captured["extension"] = extension + captured["mime_type"] = mime_type + return FileType.DOCUMENT + + monkeypatch.setattr(builders, "standardize_file_type", fake_standardize) + + datasource_file = self._make_datasource_file(key="folder/data.csv", mime_type="text/csv") + self._patch_session(monkeypatch, datasource_file) + + access_controller = MagicMock() + access_controller.apply_upload_file_filters = lambda stmt: stmt + + file = builders._build_from_datasource_file( + mapping={"datasource_file_id": "file-id", "transfer_method": "datasource_file"}, + tenant_id="tenant-id", + transfer_method=FileTransferMethod.DATASOURCE_FILE, + access_controller=access_controller, + ) + + assert captured["extension"] == ".csv", ( + f"standardize_file_type received {captured['extension']!r}; expected single-dot '.csv'" + ) + assert captured["mime_type"] == "text/csv" + assert file.extension == ".csv" + + def test_extension_falls_back_to_bin_when_key_has_no_dot(self, monkeypatch: pytest.MonkeyPatch): + captured: dict = {} + + def fake_standardize(*, extension: str = "", mime_type: str = ""): + from graphon.file import FileType + + captured["extension"] = extension + return FileType.CUSTOM + + monkeypatch.setattr(builders, "standardize_file_type", fake_standardize) + + datasource_file = self._make_datasource_file(key="dotless-key", mime_type="application/octet-stream") + self._patch_session(monkeypatch, datasource_file) + + access_controller = MagicMock() + access_controller.apply_upload_file_filters = lambda stmt: stmt + + file = builders._build_from_datasource_file( + mapping={"datasource_file_id": "file-id", "transfer_method": "datasource_file"}, + tenant_id="tenant-id", + transfer_method=FileTransferMethod.DATASOURCE_FILE, + access_controller=access_controller, + ) + + assert captured["extension"] == ".bin" + assert file.extension == ".bin" diff --git a/api/tests/unit_tests/libs/_human_input/support.py b/api/tests/unit_tests/libs/_human_input/support.py index e6cc23161e..0f593507fd 100644 --- a/api/tests/unit_tests/libs/_human_input/support.py +++ b/api/tests/unit_tests/libs/_human_input/support.py @@ -4,7 +4,7 @@ from dataclasses import dataclass, field from datetime import datetime, timedelta from typing import Any -from graphon.nodes.human_input.entities import FormInput +from graphon.nodes.human_input.entities import FormInputConfig from graphon.nodes.human_input.enums import TimeoutUnit from libs.datetime_utils import naive_utc_now @@ -45,7 +45,7 @@ class HumanInputForm: tenant_id: str app_id: str | None form_content: str - inputs: list[FormInput] + inputs: list[FormInputConfig] user_actions: list[dict[str, Any]] timeout: int timeout_unit: TimeoutUnit @@ -88,7 +88,7 @@ class HumanInputForm: def to_response_dict(self, *, include_site_info: bool) -> dict[str, Any]: inputs_response = [ { - "type": form_input.type.name.lower().replace("_", "-"), + "type": form_input.type.value, "output_variable_name": form_input.output_variable_name, } for form_input in self.inputs diff --git a/api/tests/unit_tests/libs/_human_input/test_form_service.py b/api/tests/unit_tests/libs/_human_input/test_form_service.py index fa2c02020b..decd7c484b 100644 --- a/api/tests/unit_tests/libs/_human_input/test_form_service.py +++ b/api/tests/unit_tests/libs/_human_input/test_form_service.py @@ -7,11 +7,10 @@ from datetime import timedelta import pytest from graphon.nodes.human_input.entities import ( - FormInput, - UserAction, + ParagraphInputConfig, + UserActionConfig, ) from graphon.nodes.human_input.enums import ( - FormInputType, TimeoutUnit, ) from libs.datetime_utils import naive_utc_now @@ -50,8 +49,8 @@ class TestFormService: "tenant_id": "tenant-abc", "app_id": "app-def", "form_content": "# Test Form\n\nInput: {{#$output.input#}}", - "inputs": [FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="input", default=None)], - "user_actions": [UserAction(id="submit", title="Submit")], + "inputs": [ParagraphInputConfig(output_variable_name="input")], + "user_actions": [UserActionConfig(id="submit", title="Submit")], "timeout": 1, "timeout_unit": TimeoutUnit.HOUR, "form_token": "token-xyz", @@ -304,8 +303,8 @@ class TestFormValidation: "tenant_id": "tenant-abc", "app_id": "app-def", "form_content": "Test form", - "inputs": [FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="required_input", default=None)], - "user_actions": [UserAction(id="submit", title="Submit")], + "inputs": [ParagraphInputConfig(output_variable_name="required_input")], + "user_actions": [UserActionConfig(id="submit", title="Submit")], "timeout": 1, "timeout_unit": TimeoutUnit.HOUR, } diff --git a/api/tests/unit_tests/libs/_human_input/test_models.py b/api/tests/unit_tests/libs/_human_input/test_models.py index 866ee61b3e..f6e4c9ec18 100644 --- a/api/tests/unit_tests/libs/_human_input/test_models.py +++ b/api/tests/unit_tests/libs/_human_input/test_models.py @@ -7,11 +7,10 @@ from datetime import datetime, timedelta import pytest from graphon.nodes.human_input.entities import ( - FormInput, - UserAction, + ParagraphInputConfig, + UserActionConfig, ) from graphon.nodes.human_input.enums import ( - FormInputType, TimeoutUnit, ) from libs.datetime_utils import naive_utc_now @@ -32,8 +31,8 @@ class TestHumanInputForm: "tenant_id": "tenant-abc", "app_id": "app-def", "form_content": "# Test Form\n\nInput: {{#$output.input#}}", - "inputs": [FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="input", default=None)], - "user_actions": [UserAction(id="submit", title="Submit")], + "inputs": [ParagraphInputConfig(output_variable_name="input")], + "user_actions": [UserActionConfig(id="submit", title="Submit")], "timeout": 2, "timeout_unit": TimeoutUnit.HOUR, "form_token": "token-xyz", @@ -132,7 +131,7 @@ class TestHumanInputForm: assert "site" not in response assert response["form_content"] == "# Test Form\n\nInput: {{#$output.input#}}" assert len(response["inputs"]) == 1 - assert response["inputs"][0]["type"] == "text-input" + assert response["inputs"][0]["type"] == "paragraph" assert response["inputs"][0]["output_variable_name"] == "input" def test_form_to_response_dict_with_site_info(self, sample_form_data): diff --git a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py index 8bef01c1ed..7c7f20374e 100644 --- a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py +++ b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py @@ -673,7 +673,7 @@ class TestRedisShardedSubscription: """Test cases for the _RedisShardedSubscription class.""" @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture @@ -889,7 +889,9 @@ class TestRedisShardedSubscription: assert not sharded_subscription._queue.empty() assert sharded_subscription._queue.get_nowait() == b"test sharded payload" - def test_get_message_uses_target_node_for_cluster_client(self, mock_pubsub: MagicMock, monkeypatch): + def test_get_message_uses_target_node_for_cluster_client( + self, mock_pubsub: MagicMock, monkeypatch: pytest.MonkeyPatch + ): """Test that cluster clients use target_node for sharded messages.""" class DummyRedisCluster: @@ -1177,7 +1179,7 @@ class TestRedisSubscriptionCommon: return request.param @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture diff --git a/api/tests/unit_tests/libs/test_archive_storage.py b/api/tests/unit_tests/libs/test_archive_storage.py index de3c9c4737..4363c23571 100644 --- a/api/tests/unit_tests/libs/test_archive_storage.py +++ b/api/tests/unit_tests/libs/test_archive_storage.py @@ -34,7 +34,7 @@ def _client_error(code: str) -> ClientError: return ClientError({"Error": {"Code": code}}, "Operation") -def _mock_client(monkeypatch): +def _mock_client(monkeypatch: pytest.MonkeyPatch): client = MagicMock() client.head_bucket.return_value = None # Configure put_object to return a proper ETag that matches the MD5 hash @@ -56,19 +56,19 @@ def _mock_client(monkeypatch): return client, boto_client -def test_init_disabled(monkeypatch): +def test_init_disabled(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENABLED=False) with pytest.raises(ArchiveStorageNotConfiguredError, match="not enabled"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_missing_config(monkeypatch): +def test_init_missing_config(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENDPOINT=None) with pytest.raises(ArchiveStorageNotConfiguredError, match="incomplete"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_not_found(monkeypatch): +def test_init_bucket_not_found(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("404") @@ -77,7 +77,7 @@ def test_init_bucket_not_found(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_access_denied(monkeypatch): +def test_init_bucket_access_denied(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("403") @@ -86,7 +86,7 @@ def test_init_bucket_access_denied(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_other_error(monkeypatch): +def test_init_bucket_other_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("500") @@ -95,7 +95,7 @@ def test_init_bucket_other_error(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_sets_client(monkeypatch): +def test_init_sets_client(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, boto_client = _mock_client(monkeypatch) @@ -113,7 +113,7 @@ def test_init_sets_client(monkeypatch): assert storage.bucket == BUCKET_NAME -def test_put_object_returns_checksum(monkeypatch): +def test_put_object_returns_checksum(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -132,7 +132,7 @@ def test_put_object_returns_checksum(monkeypatch): assert checksum == expected_md5 -def test_put_object_raises_on_error(monkeypatch): +def test_put_object_raises_on_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -142,7 +142,7 @@ def test_put_object_raises_on_error(monkeypatch): storage.put_object("key", b"data") -def test_get_object_returns_bytes(monkeypatch): +def test_get_object_returns_bytes(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -153,7 +153,7 @@ def test_get_object_returns_bytes(monkeypatch): assert storage.get_object("key") == b"payload" -def test_get_object_missing(monkeypatch): +def test_get_object_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -163,7 +163,7 @@ def test_get_object_missing(monkeypatch): storage.get_object("missing") -def test_get_object_stream(monkeypatch): +def test_get_object_stream(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -174,7 +174,7 @@ def test_get_object_stream(monkeypatch): assert list(storage.get_object_stream("key")) == [b"a", b"b"] -def test_get_object_stream_missing(monkeypatch): +def test_get_object_stream_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -184,7 +184,7 @@ def test_get_object_stream_missing(monkeypatch): list(storage.get_object_stream("missing")) -def test_object_exists(monkeypatch): +def test_object_exists(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -194,7 +194,7 @@ def test_object_exists(monkeypatch): assert storage.object_exists("missing") is False -def test_delete_object_error(monkeypatch): +def test_delete_object_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.delete_object.side_effect = _client_error("500") @@ -204,7 +204,7 @@ def test_delete_object_error(monkeypatch): storage.delete_object("key") -def test_list_objects(monkeypatch): +def test_list_objects(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -219,7 +219,7 @@ def test_list_objects(monkeypatch): paginator.paginate.assert_called_once_with(Bucket="archive-bucket", Prefix="prefix") -def test_list_objects_error(monkeypatch): +def test_list_objects_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -231,7 +231,7 @@ def test_list_objects_error(monkeypatch): storage.list_objects("prefix") -def test_generate_presigned_url(monkeypatch): +def test_generate_presigned_url(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.return_value = "http://signed-url" @@ -247,7 +247,7 @@ def test_generate_presigned_url(monkeypatch): assert url == "http://signed-url" -def test_generate_presigned_url_error(monkeypatch): +def test_generate_presigned_url_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.side_effect = _client_error("500") diff --git a/api/tests/unit_tests/libs/test_pandas.py b/api/tests/unit_tests/libs/test_pandas.py index 21c2f0781d..a4739dbbc2 100644 --- a/api/tests/unit_tests/libs/test_pandas.py +++ b/api/tests/unit_tests/libs/test_pandas.py @@ -1,7 +1,8 @@ import pandas as pd +import pytest -def test_pandas_csv(tmp_path, monkeypatch): +def test_pandas_csv(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -16,7 +17,7 @@ def test_pandas_csv(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx(tmp_path, monkeypatch): +def test_pandas_xlsx(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -31,7 +32,7 @@ def test_pandas_xlsx(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch): +def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data1 = {"col1": [1, 2, 3, 4, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data1) diff --git a/api/tests/unit_tests/libs/test_rate_limiter.py b/api/tests/unit_tests/libs/test_rate_limiter.py index 9d44b07b5e..5052033db8 100644 --- a/api/tests/unit_tests/libs/test_rate_limiter.py +++ b/api/tests/unit_tests/libs/test_rate_limiter.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock +import pytest + from libs import helper as helper_module @@ -31,7 +33,7 @@ class _FakeRedis: return True -def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): +def test_rate_limiter_counts_attempts_within_same_second(monkeypatch: pytest.MonkeyPatch): fake_redis = _FakeRedis() monkeypatch.setattr(helper_module.time, "time", lambda: 1000) @@ -48,7 +50,7 @@ def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): assert limiter.is_rate_limited("203.0.113.10") is True -def test_rate_limiter_uses_injected_redis(monkeypatch): +def test_rate_limiter_uses_injected_redis(monkeypatch: pytest.MonkeyPatch): redis_client = MagicMock() redis_client.zcard.return_value = 1 monkeypatch.setattr(helper_module.time, "time", lambda: 1000) diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index 6a65b5faa0..734568d37b 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,6 @@ from unittest.mock import MagicMock +import pytest from werkzeug.wrappers import Response from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN @@ -30,7 +31,7 @@ def test_extract_access_token(): assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] -def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): +def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", "", raising=False) @@ -38,7 +39,7 @@ def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): assert token._real_cookie_name("csrf_token") == "__Host-csrf_token" -def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): +def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) @@ -46,7 +47,7 @@ def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): assert token._real_cookie_name("csrf_token") == "csrf_token" -def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch): +def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) diff --git a/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py b/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py index ac4b087b91..8f27d0938d 100644 --- a/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py +++ b/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py @@ -3,7 +3,7 @@ from __future__ import annotations from datetime import UTC, datetime from types import SimpleNamespace -from graphon.nodes.human_input.entities import FormDefinition, FormInput, UserAction +from graphon.nodes.human_input.entities import FormDefinition, ParagraphInputConfig, UserActionConfig from graphon.nodes.human_input.enums import FormInputType from models.human_input import RecipientType from repositories.sqlalchemy_api_workflow_run_repository import _build_human_input_required_reason @@ -13,8 +13,8 @@ def _build_form_model() -> SimpleNamespace: expiration_time = datetime(2024, 1, 1, tzinfo=UTC) definition = FormDefinition( form_content="content", - inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], - user_actions=[UserAction(id="approve", title="Approve")], + inputs=[ParagraphInputConfig(type=FormInputType.PARAGRAPH, output_variable_name="name")], + user_actions=[UserActionConfig(id="approve", title="Approve")], rendered_content="rendered", expiration_time=expiration_time, default_values={"name": "Alice"}, diff --git a/api/tests/unit_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py b/api/tests/unit_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py new file mode 100644 index 0000000000..8547acc047 --- /dev/null +++ b/api/tests/unit_tests/repositories/test_sqlalchemy_execution_extra_content_repository.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import json +from datetime import timedelta +from typing import cast + +from sqlalchemy.orm import Session, sessionmaker + +from graphon.nodes.human_input.entities import FormDefinition, UserActionConfig +from graphon.nodes.human_input.enums import HumanInputFormStatus +from libs.datetime_utils import naive_utc_now +from models.execution_extra_content import HumanInputContent as HumanInputContentModel +from models.human_input import HumanInputForm +from repositories.sqlalchemy_execution_extra_content_repository import SQLAlchemyExecutionExtraContentRepository + + +def test_map_human_input_content_populates_submission_data_from_stored_form_submission() -> None: + expiration_time = naive_utc_now() + timedelta(days=1) + stored_submission_data = {"decision": "approve", "comment": "Looks good"} + form_definition = FormDefinition( + form_content="content", + inputs=[], + user_actions=[UserActionConfig(id="approve", title="Approve")], + rendered_content="Rendered Approve", + expiration_time=expiration_time, + node_title="Approval", + display_in_ui=True, + ) + form = HumanInputForm( + tenant_id="tenant-1", + app_id="app-1", + workflow_run_id="workflow-run-1", + node_id="node-1", + form_definition=form_definition.model_dump_json(), + rendered_content="Rendered Approve", + expiration_time=expiration_time, + selected_action_id="approve", + submitted_data=json.dumps(stored_submission_data), + submitted_at=naive_utc_now(), + status=HumanInputFormStatus.SUBMITTED, + ) + form.id = "form-1" + model = HumanInputContentModel.new( + workflow_run_id="workflow-run-1", + form_id=form.id, + message_id="message-1", + ) + model.id = "content-1" + model.form = form + repository = SQLAlchemyExecutionExtraContentRepository(cast(sessionmaker[Session], object())) + + content = repository._map_human_input_content(model, {}) + + assert content is not None + assert content.form_submission_data is not None + assert content.form_submission_data.submitted_data == stored_submission_data diff --git a/api/tests/unit_tests/services/controller_api.py b/api/tests/unit_tests/services/controller_api.py index 762d7b9090..e7f7cabecd 100644 --- a/api/tests/unit_tests/services/controller_api.py +++ b/api/tests/unit_tests/services/controller_api.py @@ -146,7 +146,7 @@ class ControllerApiTestDataFactory: return app @staticmethod - def create_api_instance(app): + def create_api_instance(app: Flask): """ Create a Flask-RESTX API instance. @@ -160,7 +160,12 @@ class ControllerApiTestDataFactory: return api @staticmethod - def create_test_client(app, api, resource_class, route): + def create_test_client( + app: Flask, + api: Api, + resource_class: type, + route: str, + ): """ Create a Flask test client with a resource registered. @@ -302,7 +307,7 @@ class TestDatasetListApi: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """ Create Flask-RESTX API instance. @@ -311,7 +316,7 @@ class TestDatasetListApi: return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """ Create test client with DatasetListApi registered. @@ -472,12 +477,12 @@ class TestDatasetApiGet: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client with DatasetApi registered.""" return ControllerApiTestDataFactory.create_test_client(app, api, DatasetApi, "/datasets/") @@ -588,12 +593,12 @@ class TestDatasetApiCreate: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client with DatasetApi registered.""" return ControllerApiTestDataFactory.create_test_client(app, api, DatasetApi, "/datasets") @@ -681,12 +686,12 @@ class TestHitTestingApi: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client with HitTestingApi registered.""" return ControllerApiTestDataFactory.create_test_client( app, api, HitTestingApi, "/datasets//hit-testing" @@ -799,12 +804,12 @@ class TestExternalDatasetApi: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client_list(self, app, api): + def client_list(self, app: Flask, api: Api): """Create test client for external knowledge API list endpoint.""" return ControllerApiTestDataFactory.create_test_client( app, api, ExternalApiTemplateListApi, "/datasets/external-knowledge-api" diff --git a/api/tests/unit_tests/services/plugin/conftest.py b/api/tests/unit_tests/services/plugin/conftest.py index 80c6077b0c..9dc4fa0390 100644 --- a/api/tests/unit_tests/services/plugin/conftest.py +++ b/api/tests/unit_tests/services/plugin/conftest.py @@ -21,7 +21,7 @@ def make_features( @pytest.fixture -def mock_installer(monkeypatch): +def mock_installer(monkeypatch: pytest.MonkeyPatch): """Patch PluginInstaller at the service import site.""" mock = MagicMock() monkeypatch.setattr("services.plugin.plugin_service.PluginInstaller", lambda: mock) diff --git a/api/tests/unit_tests/services/plugin/test_plugin_permission_service.py b/api/tests/unit_tests/services/plugin/test_plugin_permission_service.py deleted file mode 100644 index 53a9e6210c..0000000000 --- a/api/tests/unit_tests/services/plugin/test_plugin_permission_service.py +++ /dev/null @@ -1,79 +0,0 @@ -from unittest.mock import MagicMock, patch - -from models.account import TenantPluginPermission - -MODULE = "services.plugin.plugin_permission_service" - - -def _patched_session(): - """Patch session_factory.create_session() to return a mock session as context manager.""" - session = MagicMock() - session.__enter__ = MagicMock(return_value=session) - session.__exit__ = MagicMock(return_value=False) - session.begin.return_value.__enter__ = MagicMock(return_value=session) - session.begin.return_value.__exit__ = MagicMock(return_value=False) - mock_factory = MagicMock() - mock_factory.create_session.return_value = session - patcher = patch(f"{MODULE}.session_factory", mock_factory) - return patcher, session - - -class TestGetPermission: - def test_returns_permission_when_found(self): - p1, session = _patched_session() - permission = MagicMock() - session.scalar.return_value = permission - - with p1: - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.get_permission("t1") - - assert result is permission - - def test_returns_none_when_not_found(self): - p1, session = _patched_session() - session.scalar.return_value = None - - with p1: - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.get_permission("t1") - - assert result is None - - -class TestChangePermission: - def test_creates_new_permission_when_not_exists(self): - p1, session = _patched_session() - session.scalar.return_value = None - - with p1, patch(f"{MODULE}.select"), patch(f"{MODULE}.TenantPluginPermission") as perm_cls: - perm_cls.return_value = MagicMock() - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.change_permission( - "t1", TenantPluginPermission.InstallPermission.EVERYONE, TenantPluginPermission.DebugPermission.EVERYONE - ) - - assert result is True - session.begin.assert_called_once() - session.add.assert_called_once() - - def test_updates_existing_permission(self): - p1, session = _patched_session() - existing = MagicMock() - session.scalar.return_value = existing - - with p1: - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.change_permission( - "t1", TenantPluginPermission.InstallPermission.ADMINS, TenantPluginPermission.DebugPermission.ADMINS - ) - - assert result is True - session.begin.assert_called_once() - assert existing.install_permission == TenantPluginPermission.InstallPermission.ADMINS - assert existing.debug_permission == TenantPluginPermission.DebugPermission.ADMINS - session.add.assert_not_called() diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py index 1a2d062208..287391c24c 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py @@ -2,12 +2,13 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from services.rag_pipeline.rag_pipeline_task_proxy import RagPipelineTaskProxy @pytest.fixture -def proxy(mocker): +def proxy(mocker: MockerFixture): """Create a RagPipelineTaskProxy with mocked dependencies.""" mocker.patch("services.rag_pipeline.rag_pipeline_task_proxy.TenantIsolatedTaskQueue") entity = Mock() diff --git a/api/tests/unit_tests/services/recommend_app/test_category_order.py b/api/tests/unit_tests/services/recommend_app/test_category_order.py new file mode 100644 index 0000000000..3b94021f26 --- /dev/null +++ b/api/tests/unit_tests/services/recommend_app/test_category_order.py @@ -0,0 +1,26 @@ +import json +from unittest.mock import patch + +from services.recommend_app.category_order import get_explore_app_category_order, order_categories + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_get_explore_app_category_order_returns_redis_list(mock_get): + mock_get.return_value = json.dumps(["C", "A", "B"]).encode() + + assert get_explore_app_category_order("en-US") == ["C", "A", "B"] + mock_get.assert_called_once_with("explore:apps:category_order:en-US") + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_order_categories_uses_redis_order_as_source_of_truth(mock_get): + mock_get.return_value = json.dumps(["C", "A", "B"]).encode() + + assert order_categories({"A", "B", "C", "D"}, "en-US") == ["C", "A", "B"] + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_order_categories_falls_back_to_sorted_categories_without_redis_order(mock_get): + mock_get.return_value = None + + assert order_categories({"B", "A", "C"}, "en-US") == ["A", "B", "C"] diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index d3f9c5dd9f..216c5d9db6 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -20,6 +20,7 @@ from contextlib import contextmanager from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.app_generate_service as ags_module from core.app.entities.app_invoke_entities import InvokeFrom @@ -96,7 +97,7 @@ def _noop_rate_limit_context(rate_limit, request_id): class TestBuildStreamingTaskOnSubscribe: """Tests for AppGenerateService._build_streaming_task_on_subscribe.""" - def test_streams_mode_starts_immediately(self, monkeypatch): + def test_streams_mode_starts_immediately(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") called = [] cb = AppGenerateService._build_streaming_task_on_subscribe(lambda: called.append(1)) @@ -106,7 +107,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] # not called again - def test_pubsub_mode_starts_on_subscribe(self, monkeypatch): + def test_pubsub_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) # large to prevent timer called = [] @@ -118,7 +119,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_sharded_mode_starts_on_subscribe(self, monkeypatch): + def test_sharded_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): """sharded is treated like pubsub (i.e. not 'streams').""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) @@ -128,7 +129,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_pubsub_fallback_timer_fires(self, monkeypatch): + def test_pubsub_fallback_timer_fires(self, monkeypatch: pytest.MonkeyPatch): """When nobody subscribes fast enough the fallback timer fires.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 50) # 50 ms @@ -137,7 +138,7 @@ class TestBuildStreamingTaskOnSubscribe: time.sleep(0.2) # give the timer time to fire assert called == [1] - def test_exception_in_start_task_returns_false(self, monkeypatch): + def test_exception_in_start_task_returns_false(self, monkeypatch: pytest.MonkeyPatch): """When start_task raises, _try_start returns False and next call retries.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") call_count = 0 @@ -154,7 +155,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert call_count == 2 - def test_concurrent_subscribe_only_starts_once(self, monkeypatch): + def test_concurrent_subscribe_only_starts_once(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) call_count = 0 @@ -176,31 +177,31 @@ class TestBuildStreamingTaskOnSubscribe: # _get_max_active_requests # --------------------------------------------------------------------------- class TestGetMaxActiveRequests: - def test_both_zero_returns_zero(self, monkeypatch): + def test_both_zero_returns_zero(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 0 - def test_app_limit_only(self, monkeypatch): + def test_app_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_config_limit_only(self, monkeypatch): + def test_config_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 10) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 10 - def test_both_non_zero_returns_min(self, monkeypatch): + def test_both_non_zero_returns_min(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 20) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_default_active_requests_used_when_app_has_none(self, monkeypatch): + def test_default_active_requests_used_when_app_has_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 15) app = _make_app(AppMode.CHAT, max_active_requests=0) @@ -214,7 +215,7 @@ class TestGenerate: """Tests for AppGenerateService.generate covering each mode.""" @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) # Prevent AppExecutionParams.new from touching real models via isinstance @@ -224,7 +225,7 @@ class TestGenerate: ) # -- COMPLETION --------------------------------------------------------- - def test_completion_mode(self, mocker): + def test_completion_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate", return_value={"result": "ok"}, @@ -244,7 +245,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via mode ------------------------------------------------ - def test_agent_chat_mode(self, mocker): + def test_agent_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent"}, @@ -264,7 +265,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via is_agent flag (non-AGENT_CHAT mode) ----------------- - def test_agent_via_is_agent_flag(self, mocker): + def test_agent_via_is_agent_flag(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent-via-flag"}, @@ -285,7 +286,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- CHAT --------------------------------------------------------------- - def test_chat_mode(self, mocker): + def test_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.ChatAppGenerator.generate", return_value={"result": "chat"}, @@ -306,7 +307,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- ADVANCED_CHAT blocking --------------------------------------------- - def test_advanced_chat_blocking(self, mocker): + def test_advanced_chat_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) @@ -333,7 +334,7 @@ class TestGenerate: retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- - def test_advanced_chat_streaming(self, mocker, monkeypatch): + def test_advanced_chat_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -365,7 +366,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- WORKFLOW blocking -------------------------------------------------- - def test_workflow_blocking(self, mocker): + def test_workflow_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -390,7 +391,7 @@ class TestGenerate: assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" # -- WORKFLOW streaming ------------------------------------------------- - def test_workflow_streaming(self, mocker, monkeypatch): + def test_workflow_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -422,7 +423,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- Invalid mode ------------------------------------------------------- - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app("invalid-mode", is_agent=False) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate( @@ -439,14 +440,14 @@ class TestGenerate: # --------------------------------------------------------------------------- class TestGenerateBilling: @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) mocker.patch( "services.app_generate_service.rate_limit_context", _noop_rate_limit_context, ) - def test_billing_enabled_consumes_quota(self, mocker, monkeypatch): + def test_billing_enabled_consumes_quota(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() reserve_mock = mocker.patch( @@ -472,7 +473,9 @@ class TestGenerateBilling: reserve_mock.assert_called_once_with(QuotaType.WORKFLOW, "tenant-id") quota_charge.commit.assert_called_once() - def test_billing_quota_exceeded_raises_rate_limit_error(self, mocker, monkeypatch): + def test_billing_quota_exceeded_raises_rate_limit_error( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): from services.errors.app import QuotaExceededError from services.errors.llm import InvokeRateLimitError @@ -491,7 +494,7 @@ class TestGenerateBilling: streaming=False, ) - def test_exception_refunds_quota_and_exits_rate_limit(self, mocker, monkeypatch): + def test_exception_refunds_quota_and_exits_rate_limit(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() mocker.patch( @@ -517,7 +520,9 @@ class TestGenerateBilling: ) quota_charge.refund.assert_called_once() - def test_rate_limit_exit_called_in_finally_for_blocking(self, mocker, monkeypatch): + def test_rate_limit_exit_called_in_finally_for_blocking( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): """For non-streaming (blocking) calls, rate_limit.exit should be called in finally.""" monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) @@ -552,7 +557,7 @@ class TestGenerateBilling: # _get_workflow # --------------------------------------------------------------------------- class TestGetWorkflow: - def test_debugger_fetches_draft(self, mocker): + def test_debugger_fetches_draft(self, mocker: MockerFixture): draft_wf = _make_workflow() ws = MagicMock() ws.get_draft_workflow.return_value = draft_wf @@ -562,7 +567,7 @@ class TestGetWorkflow: assert result is draft_wf ws.get_draft_workflow.assert_called_once() - def test_debugger_raises_when_no_draft(self, mocker): + def test_debugger_raises_when_no_draft(self, mocker: MockerFixture): ws = MagicMock() ws.get_draft_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -570,7 +575,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not initialized"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.DEBUGGER) - def test_non_debugger_fetches_published(self, mocker): + def test_non_debugger_fetches_published(self, mocker: MockerFixture): pub_wf = _make_workflow() ws = MagicMock() ws.get_published_workflow.return_value = pub_wf @@ -580,7 +585,7 @@ class TestGetWorkflow: assert result is pub_wf ws.get_published_workflow.assert_called_once() - def test_non_debugger_raises_when_no_published(self, mocker): + def test_non_debugger_raises_when_no_published(self, mocker: MockerFixture): ws = MagicMock() ws.get_published_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -588,7 +593,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not published"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API) - def test_specific_workflow_id_valid_uuid(self, mocker): + def test_specific_workflow_id_valid_uuid(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) specific_wf = _make_workflow(workflow_id=valid_uuid) ws = MagicMock() @@ -601,7 +606,7 @@ class TestGetWorkflow: assert result is specific_wf ws.get_published_workflow_by_id.assert_called_once() - def test_specific_workflow_id_invalid_uuid(self, mocker): + def test_specific_workflow_id_invalid_uuid(self, mocker: MockerFixture): ws = MagicMock() mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -610,7 +615,7 @@ class TestGetWorkflow: _make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API, workflow_id="not-a-uuid" ) - def test_specific_workflow_id_not_found(self, mocker): + def test_specific_workflow_id_not_found(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) ws = MagicMock() ws.get_published_workflow_by_id.return_value = None @@ -626,7 +631,7 @@ class TestGetWorkflow: # generate_single_iteration # --------------------------------------------------------------------------- class TestGenerateSingleIteration: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -644,7 +649,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "iteration"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -662,7 +667,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "wf-iteration"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.CHAT) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_iteration(app_model=app, user=_make_user(), node_id="n1", args={}) @@ -672,7 +677,7 @@ class TestGenerateSingleIteration: # generate_single_loop # --------------------------------------------------------------------------- class TestGenerateSingleLoop: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -690,7 +695,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "loop"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -708,7 +713,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "wf-loop"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.COMPLETION) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_loop(app_model=app, user=_make_user(), node_id="n1", args=MagicMock()) @@ -718,7 +723,7 @@ class TestGenerateSingleLoop: # generate_more_like_this # --------------------------------------------------------------------------- class TestGenerateMoreLikeThis: - def test_delegates_to_completion_generator(self, mocker): + def test_delegates_to_completion_generator(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate_more_like_this", return_value={"result": "similar"}, @@ -739,7 +744,7 @@ class TestGenerateMoreLikeThis: # get_response_generator # --------------------------------------------------------------------------- class TestGetResponseGenerator: - def test_non_ended_workflow_run(self, mocker): + def test_non_ended_workflow_run(self, mocker: MockerFixture): app = _make_app(AppMode.ADVANCED_CHAT) workflow_run = MagicMock() workflow_run.id = "run-1" @@ -756,7 +761,7 @@ class TestGetResponseGenerator: result = AppGenerateService.get_response_generator(app_model=app, workflow_run=workflow_run) gen_instance.retrieve_events.assert_called_once() - def test_ended_workflow_run_still_returns_generator(self, mocker): + def test_ended_workflow_run_still_returns_generator(self, mocker: MockerFixture): """Even when the run is ended, the current code still returns a generator (TODO branch).""" app = _make_app(AppMode.WORKFLOW) workflow_run = MagicMock() diff --git a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py index 30aa359b45..4293be8f72 100644 --- a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py +++ b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py @@ -89,7 +89,7 @@ class _FakeStreams: @pytest.fixture -def _patch_get_channel_streams(monkeypatch): +def _patch_get_channel_streams(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.streams_channel import StreamsBroadcastChannel fake = _FakeStreams() @@ -108,7 +108,7 @@ def _patch_get_channel_streams(monkeypatch): @pytest.fixture -def _patch_get_channel_pubsub(monkeypatch): +def _patch_get_channel_pubsub(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel store: dict[str, deque[bytes]] = defaultdict(deque) @@ -163,7 +163,7 @@ def test_streams_full_flow_prepublish_and_replay(): @pytest.mark.usefixtures("_patch_get_channel_pubsub") -def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch): +def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch: pytest.MonkeyPatch): # Speed up any potential timer if it accidentally triggers monkeypatch.setattr("services.app_generate_service.SSE_TASK_START_FALLBACK_MS", 50) diff --git a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py index 9a513c3fe6..f5879d973d 100644 --- a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py +++ b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py @@ -22,7 +22,7 @@ class FakeLock: @pytest.fixture -def fake_current_user(monkeypatch): +def fake_current_user(monkeypatch: pytest.MonkeyPatch): user = create_autospec(Account, instance=True) user.id = "user-1" user.current_tenant_id = "tenant-1" @@ -31,7 +31,7 @@ def fake_current_user(monkeypatch): @pytest.fixture -def fake_features(monkeypatch): +def fake_features(monkeypatch: pytest.MonkeyPatch): """Features.billing.enabled == False to skip quota logic.""" features = types.SimpleNamespace( billing=types.SimpleNamespace(enabled=False, subscription=types.SimpleNamespace(plan="ENTERPRISE")), @@ -45,7 +45,7 @@ def fake_features(monkeypatch): @pytest.fixture -def fake_lock(monkeypatch): +def fake_lock(monkeypatch: pytest.MonkeyPatch): """Patch redis_client.lock to always raise LockNotOwnedError on enter.""" def _fake_lock(name, timeout=None, *args, **kwargs): @@ -61,7 +61,7 @@ def fake_lock(monkeypatch): def test_save_document_with_dataset_id_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_features, fake_lock, @@ -118,7 +118,7 @@ def test_save_document_with_dataset_id_ignores_lock_not_owned( def test_add_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): @@ -161,7 +161,7 @@ def test_add_segment_ignores_lock_not_owned( def test_multi_create_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): diff --git a/api/tests/unit_tests/services/test_human_input_file_upload_service.py b/api/tests/unit_tests/services/test_human_input_file_upload_service.py new file mode 100644 index 0000000000..b6e429a46a --- /dev/null +++ b/api/tests/unit_tests/services/test_human_input_file_upload_service.py @@ -0,0 +1,294 @@ +from __future__ import annotations + +from datetime import timedelta +from types import SimpleNamespace +from unittest.mock import MagicMock + +import pytest +from sqlalchemy import create_engine, select +from sqlalchemy.orm import sessionmaker + +import models.account as account_module +import services.human_input_file_upload_service as service_module +from graphon.enums import WorkflowExecutionStatus +from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus +from libs.datetime_utils import naive_utc_now +from models.account import Account, Tenant, TenantAccountJoin +from models.base import Base +from models.enums import CreatorUserRole, WorkflowRunTriggeredFrom +from models.human_input import ( + HumanInputForm, + HumanInputFormRecipient, + HumanInputFormUploadFile, + HumanInputFormUploadToken, +) +from models.model import App, AppMode, EndUser +from models.workflow import WorkflowRun, WorkflowType +from services.human_input_file_upload_service import HITL_UPLOAD_TOKEN_PREFIX, HumanInputFileUploadService +from services.human_input_service import FormSubmittedError + + +@pytest.fixture +def session_maker(monkeypatch: pytest.MonkeyPatch): + engine = create_engine("sqlite:///:memory:") + monkeypatch.setattr(account_module, "db", SimpleNamespace(engine=engine)) + Base.metadata.create_all( + engine, + tables=[ + Tenant.__table__, + Account.__table__, + TenantAccountJoin.__table__, + App.__table__, + EndUser.__table__, + WorkflowRun.__table__, + HumanInputForm.__table__, + HumanInputFormRecipient.__table__, + HumanInputFormUploadToken.__table__, + HumanInputFormUploadFile.__table__, + ], + ) + try: + yield sessionmaker(bind=engine, expire_on_commit=False) + finally: + Base.metadata.drop_all( + engine, + tables=[ + HumanInputFormUploadFile.__table__, + HumanInputFormUploadToken.__table__, + HumanInputFormRecipient.__table__, + HumanInputForm.__table__, + WorkflowRun.__table__, + EndUser.__table__, + App.__table__, + TenantAccountJoin.__table__, + Account.__table__, + Tenant.__table__, + ], + ) + engine.dispose() + + +def _create_waiting_form( + session_maker, + *, + created_by_role: CreatorUserRole = CreatorUserRole.ACCOUNT, + form_kind: HumanInputFormKind = HumanInputFormKind.RUNTIME, +) -> tuple[str, str, str]: + form_id = "00000000-0000-0000-0000-000000000001" + recipient_id = "00000000-0000-0000-0000-000000000002" + workflow_run_id = None + if form_kind == HumanInputFormKind.RUNTIME: + workflow_run_id = "00000000-0000-0000-0000-000000000012" + tenant_id = "00000000-0000-0000-0000-000000000010" + app_id = "00000000-0000-0000-0000-000000000011" + now = naive_utc_now() + created_by = ( + "00000000-0000-0000-0000-000000000020" + if created_by_role == CreatorUserRole.ACCOUNT + else "00000000-0000-0000-0000-000000000021" + ) + with session_maker.begin() as session: + tenant = Tenant(name="tenant-1") + tenant.id = tenant_id + session.add(tenant) + if created_by_role == CreatorUserRole.ACCOUNT: + account = Account(name="owner", email="owner@example.com") + account.id = created_by + session.add(account) + session.add( + TenantAccountJoin( + tenant_id=tenant_id, + account_id=created_by, + current=True, + ) + ) + app_creator = created_by + else: + end_user = EndUser( + tenant_id=tenant_id, + app_id=app_id, + type="web_app", + is_anonymous=False, + session_id="session-1", + external_user_id="external-1", + ) + end_user.id = created_by + session.add(end_user) + app_creator = "00000000-0000-0000-0000-000000000020" + account = Account(name="owner", email="owner@example.com") + account.id = app_creator + session.add(account) + session.add( + TenantAccountJoin( + tenant_id=tenant_id, + account_id=app_creator, + current=True, + ) + ) + app = App( + tenant_id=tenant_id, + name="app-1", + description="", + mode=AppMode.WORKFLOW, + icon_type="emoji", + icon="app", + icon_background="#ffffff", + enable_site=True, + enable_api=True, + created_by=app_creator, + updated_by=app_creator, + ) + app.id = app_id + session.add(app) + if workflow_run_id is not None: + workflow_run = WorkflowRun( + tenant_id=tenant_id, + app_id=app_id, + workflow_id="00000000-0000-0000-0000-000000000013", + type=WorkflowType.WORKFLOW, + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + version="1", + graph="{}", + inputs="{}", + status=WorkflowExecutionStatus.RUNNING, + created_by_role=created_by_role, + created_by=created_by, + created_at=now, + ) + workflow_run.id = workflow_run_id + session.add(workflow_run) + session.add( + HumanInputForm( + id=form_id, + tenant_id=tenant_id, + app_id=app_id, + workflow_run_id=workflow_run_id, + form_kind=form_kind, + node_id="node-1", + form_definition="{}", + rendered_content="content", + expiration_time=now + timedelta(hours=1), + created_at=now, + ) + ) + session.add( + HumanInputFormRecipient( + id=recipient_id, + form_id=form_id, + delivery_id="00000000-0000-0000-0000-000000000003", + recipient_type="standalone_web_app", + recipient_payload='{"TYPE": "standalone_web_app"}', + access_token="form-token-1", + ) + ) + return form_id, recipient_id, created_by + + +def test_issue_upload_token_persists_token_without_technical_end_user( + monkeypatch: pytest.MonkeyPatch, + session_maker, +) -> None: + form_id, recipient_id, _created_by = _create_waiting_form(session_maker) + monkeypatch.setattr(service_module.secrets, "token_urlsafe", lambda _bytes: "random-value") + + token = HumanInputFileUploadService(session_maker).issue_upload_token("form-token-1") + + assert token.upload_token == f"{HITL_UPLOAD_TOKEN_PREFIX}random-value" + with session_maker() as session: + token_model = session.scalar(select(HumanInputFormUploadToken)) + assert token_model is not None + assert token_model.form_id == form_id + assert token_model.recipient_id == recipient_id + assert token_model.token == token.upload_token + assert session.scalar(select(EndUser).where(EndUser.type == "human-input")) is None + + +def test_validate_upload_token_returns_account_owner_and_record_file_link(session_maker) -> None: + form_id, recipient_id, created_by = _create_waiting_form(session_maker, created_by_role=CreatorUserRole.ACCOUNT) + token = HumanInputFileUploadService(session_maker).issue_upload_token("form-token-1") + workflow_run_repository = MagicMock() + workflow_run_repository.get_workflow_run_by_id.return_value = SimpleNamespace( + tenant_id="00000000-0000-0000-0000-000000000010", + app_id="00000000-0000-0000-0000-000000000011", + created_by_role=CreatorUserRole.ACCOUNT, + created_by=created_by, + ) + + context = HumanInputFileUploadService( + session_maker, + workflow_run_repository=workflow_run_repository, + ).validate_upload_token(token.upload_token) + assert context.form_id == form_id + assert context.recipient_id == recipient_id + assert isinstance(context.owner, Account) + assert context.owner.id == created_by + assert context.owner.current_tenant_id == "00000000-0000-0000-0000-000000000010" + workflow_run_repository.get_workflow_run_by_id.assert_called_once_with( + tenant_id="00000000-0000-0000-0000-000000000010", + app_id="00000000-0000-0000-0000-000000000011", + run_id="00000000-0000-0000-0000-000000000012", + ) + + HumanInputFileUploadService(session_maker).record_upload_file( + context=context, + file_id="00000000-0000-0000-0000-000000000099", + ) + + with session_maker() as session: + link = session.scalar(select(HumanInputFormUploadFile)) + assert link is not None + assert link.tenant_id == context.tenant_id + assert link.app_id == context.app_id + assert link.form_id == form_id + assert link.upload_token_id == context.upload_token_id + + +def test_validate_upload_token_returns_end_user_owner(session_maker) -> None: + form_id, recipient_id, created_by = _create_waiting_form(session_maker, created_by_role=CreatorUserRole.END_USER) + token = HumanInputFileUploadService(session_maker).issue_upload_token("form-token-1") + workflow_run_repository = MagicMock() + workflow_run_repository.get_workflow_run_by_id.return_value = SimpleNamespace( + tenant_id="00000000-0000-0000-0000-000000000010", + app_id="00000000-0000-0000-0000-000000000011", + created_by_role=CreatorUserRole.END_USER, + created_by=created_by, + ) + + context = HumanInputFileUploadService( + session_maker, + workflow_run_repository=workflow_run_repository, + ).validate_upload_token(token.upload_token) + + assert context.form_id == form_id + assert context.recipient_id == recipient_id + assert isinstance(context.owner, EndUser) + assert context.owner.id == created_by + + +def test_validate_upload_token_allows_delivery_test_form(session_maker) -> None: + form_id, recipient_id, _created_by = _create_waiting_form( + session_maker, + form_kind=HumanInputFormKind.DELIVERY_TEST, + ) + token = HumanInputFileUploadService(session_maker).issue_upload_token("form-token-1") + + context = HumanInputFileUploadService(session_maker).validate_upload_token(token.upload_token) + + assert context.form_id == form_id + assert context.recipient_id == recipient_id + assert isinstance(context.owner, Account) + assert context.owner.id == "00000000-0000-0000-0000-000000000020" + assert context.owner.current_tenant_id == "00000000-0000-0000-0000-000000000010" + + +def test_validate_upload_token_rejects_submitted_form(session_maker) -> None: + form_id, _recipient_id, _created_by = _create_waiting_form(session_maker) + token = HumanInputFileUploadService(session_maker).issue_upload_token("form-token-1") + with session_maker.begin() as session: + form = session.get(HumanInputForm, form_id) + assert form is not None + form.status = HumanInputFormStatus.SUBMITTED + form.submitted_at = naive_utc_now() + + with pytest.raises(FormSubmittedError): + HumanInputFileUploadService(session_maker).validate_upload_token(token.upload_token) diff --git a/api/tests/unit_tests/services/test_human_input_service.py b/api/tests/unit_tests/services/test_human_input_service.py index 55af564821..8e5293a3c5 100644 --- a/api/tests/unit_tests/services/test_human_input_service.py +++ b/api/tests/unit_tests/services/test_human_input_service.py @@ -3,18 +3,24 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.human_input_service as human_input_service_module from core.repositories.human_input_repository import ( HumanInputFormRecord, HumanInputFormSubmissionRepository, ) +from graphon.file import File, FileTransferMethod, FileType from graphon.nodes.human_input.entities import ( + FileInputConfig, + FileListInputConfig, FormDefinition, - FormInput, - UserAction, + ParagraphInputConfig, + SelectInputConfig, + StringListSource, + UserActionConfig, ) -from graphon.nodes.human_input.enums import FormInputType, HumanInputFormKind, HumanInputFormStatus +from graphon.nodes.human_input.enums import HumanInputFormKind, HumanInputFormStatus, ValueSourceType from libs.datetime_utils import naive_utc_now from models.human_input import RecipientType from services.human_input_service import ( @@ -50,7 +56,7 @@ def sample_form_record(): definition=FormDefinition( form_content="hello", inputs=[], - user_actions=[UserAction(id="submit", title="Submit")], + user_actions=[UserActionConfig(id="submit", title="Submit")], rendered_content="

hello

", expiration_time=naive_utc_now() + timedelta(hours=1), ), @@ -177,7 +183,9 @@ def test_get_form_definition_by_token_for_console_uses_repository(sample_form_re assert form.get_definition() == console_record.definition -def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_calls_repository_and_enqueue( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -204,7 +212,9 @@ def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, m enqueue_spy.assert_called_once_with(sample_form_record.workflow_run_id) -def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_skips_enqueue_for_delivery_test( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) test_record = dataclasses.replace( @@ -227,7 +237,9 @@ def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record enqueue_spy.assert_not_called() -def test_submit_form_by_token_passes_submission_user_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_passes_submission_user_id( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -273,7 +285,7 @@ def test_submit_form_by_token_missing_inputs(sample_form_record, mock_session_fa definition_with_input = FormDefinition( form_content="hello", - inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="content")], + inputs=[ParagraphInputConfig(output_variable_name="content")], user_actions=sample_form_record.definition.user_actions, rendered_content="

hello

", expiration_time=sample_form_record.expiration_time, @@ -294,6 +306,157 @@ def test_submit_form_by_token_missing_inputs(sample_form_record, mock_session_fa repo.mark_submitted.assert_not_called() +def test_validate_human_input_submission_accepts_select_file_and_file_list(mock_session_factory): + session_factory, _ = mock_session_factory + service = HumanInputService(session_factory) + definition = FormDefinition.model_validate( + { + "form_content": "Pick one and upload files", + "inputs": [ + { + "type": "select", + "output_variable_name": "decision", + "option_source": { + "type": "constant", + "value": ["approve", "reject"], + }, + }, + { + "type": "file", + "output_variable_name": "attachment", + "allowed_file_types": ["document"], + "allowed_file_upload_methods": ["remote_url"], + }, + { + "type": "file-list", + "output_variable_name": "attachments", + "allowed_file_types": ["document"], + "allowed_file_upload_methods": ["remote_url"], + "number_limits": 3, + }, + ], + "user_actions": [{"id": "submit", "title": "Submit"}], + "rendered_content": "

Pick one and upload files

", + "expiration_time": naive_utc_now() + timedelta(hours=1), + } + ) + + service.validate_human_input_submission( + form_definition=definition, + selected_action_id="submit", + form_data={ + "decision": "approve", + "attachment": { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/file.txt", + "filename": "file.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + "attachments": [ + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/first.txt", + "filename": "first.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/second.txt", + "filename": "second.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + ], + }, + ) + + +@pytest.mark.parametrize( + ("input_definition", "submitted_value", "expected_message"), + [ + ( + { + "type": "select", + "output_variable_name": "decision", + "option_source": { + "type": "constant", + "value": ["approve", "reject"], + }, + }, + "unknown", + "decision", + ), + ( + { + "type": "file", + "output_variable_name": "attachment", + "allowed_file_types": ["document"], + "allowed_file_upload_methods": ["remote_url"], + }, + "not-a-file", + "attachment", + ), + ( + { + "type": "file-list", + "output_variable_name": "attachments", + "allowed_file_types": ["document"], + "allowed_file_upload_methods": ["remote_url"], + "number_limits": 2, + }, + [ + { + "type": "document", + "transfer_method": "remote_url", + "remote_url": "https://example.com/ok.txt", + "filename": "ok.txt", + "extension": ".txt", + "mime_type": "text/plain", + }, + "not-a-file", + ], + "attachments", + ), + ], +) +def test_validate_human_input_submission_rejects_invalid_select_and_file_payloads( + sample_form_record, + mock_session_factory, + input_definition, + submitted_value, + expected_message, +): + session_factory, _ = mock_session_factory + repo = MagicMock(spec=HumanInputFormSubmissionRepository) + definition = FormDefinition.model_validate( + { + "form_content": "Validate form data", + "inputs": [input_definition], + "user_actions": [{"id": "submit", "title": "Submit"}], + "rendered_content": "

Validate form data

", + "expiration_time": naive_utc_now() + timedelta(hours=1), + } + ) + repo.get_by_token.return_value = dataclasses.replace(sample_form_record, definition=definition) + service = HumanInputService(session_factory, form_repository=repo) + + with pytest.raises(InvalidFormDataError) as exc_info: + service.submit_form_by_token( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token", + selected_action_id="submit", + form_data={input_definition["output_variable_name"]: submitted_value}, + ) + + assert expected_message in str(exc_info.value) + repo.mark_submitted.assert_not_called() + + def test_form_properties(sample_form_record): form = Form(sample_form_record) assert form.id == "form-id" @@ -314,7 +477,7 @@ def test_form_submitted_error_init(): assert error.code == 412 -def test_human_input_service_init_with_engine(mocker): +def test_human_input_service_init_with_engine(mocker: MockerFixture): engine = MagicMock(spec=human_input_service_module.Engine) sessionmaker_mock = mocker.patch("services.human_input_service.sessionmaker") @@ -371,7 +534,7 @@ def test_submit_form_by_token_delivery_not_enabled(mock_session_factory): service.submit_form_by_token(RecipientType.STANDALONE_WEB_APP, "token", "action", {}) -def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker: MockerFixture): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -461,3 +624,203 @@ def test_is_globally_expired_zero_timeout(monkeypatch, sample_form_record, mock_ monkeypatch.setattr(human_input_service_module.dify_config, "HUMAN_INPUT_GLOBAL_TIMEOUT_SECONDS", 0) assert service._is_globally_expired(Form(sample_form_record)) is False + + +def test_submit_form_by_token_normalizes_select_and_files(sample_form_record, mock_session_factory, mocker) -> None: + session_factory, _ = mock_session_factory + repo = MagicMock(spec=HumanInputFormSubmissionRepository) + definition = FormDefinition( + form_content="hello", + inputs=[ + SelectInputConfig( + output_variable_name="decision", + option_source=StringListSource(type=ValueSourceType.CONSTANT, value=["approve", "reject"]), + ), + FileInputConfig(output_variable_name="attachment"), + FileListInputConfig(output_variable_name="attachments", number_limits=3), + ], + user_actions=[UserActionConfig(id="submit", title="Submit")], + rendered_content="

hello

", + expiration_time=sample_form_record.expiration_time, + ) + form_with_inputs = dataclasses.replace(sample_form_record, definition=definition) + repo.get_by_token.return_value = form_with_inputs + repo.mark_submitted.return_value = form_with_inputs + service = HumanInputService(session_factory, form_repository=repo) + + single_file = File( + file_id="file-1", + file_type=FileType.DOCUMENT, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id="upload-1", + filename="resume.pdf", + extension=".pdf", + mime_type="application/pdf", + size=128, + ) + list_files = [ + File( + file_id="file-2", + file_type=FileType.DOCUMENT, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id="upload-2", + filename="a.pdf", + extension=".pdf", + mime_type="application/pdf", + size=64, + ), + File( + file_id="file-3", + file_type=FileType.DOCUMENT, + transfer_method=FileTransferMethod.REMOTE_URL, + remote_url="https://example.com/b.pdf", + filename="b.pdf", + extension=".pdf", + mime_type="application/pdf", + size=96, + ), + ] + mocker.patch("services.human_input_service.build_from_mapping", return_value=single_file) + mocker.patch("services.human_input_service.build_from_mappings", return_value=list_files) + enqueue_spy = mocker.patch.object(service, "enqueue_resume") + + service.submit_form_by_token( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token", + selected_action_id="submit", + form_data={ + "decision": "approve", + "attachment": {"transfer_method": "local_file", "upload_file_id": "upload-1", "type": "document"}, + "attachments": [ + {"transfer_method": "local_file", "upload_file_id": "upload-2", "type": "document"}, + {"transfer_method": "remote_url", "url": "https://example.com/b.pdf", "type": "document"}, + ], + }, + ) + + submitted_data = repo.mark_submitted.call_args.kwargs["form_data"] + assert submitted_data["decision"] == "approve" + assert submitted_data["attachment"]["filename"] == "resume.pdf" + assert submitted_data["attachment"]["transfer_method"] == "local_file" + assert submitted_data["attachments"][0]["filename"] == "a.pdf" + assert submitted_data["attachments"][1]["filename"] == "b.pdf" + enqueue_spy.assert_called_once_with(sample_form_record.workflow_run_id) + + +def test_submit_form_by_token_invalid_select_value(sample_form_record, mock_session_factory) -> None: + session_factory, _ = mock_session_factory + repo = MagicMock(spec=HumanInputFormSubmissionRepository) + definition = FormDefinition( + form_content="hello", + inputs=[ + SelectInputConfig( + output_variable_name="decision", + option_source=StringListSource(type=ValueSourceType.CONSTANT, value=["approve", "reject"]), + ) + ], + user_actions=[UserActionConfig(id="submit", title="Submit")], + rendered_content="

hello

", + expiration_time=sample_form_record.expiration_time, + ) + repo.get_by_token.return_value = dataclasses.replace(sample_form_record, definition=definition) + service = HumanInputService(session_factory, form_repository=repo) + + with pytest.raises(InvalidFormDataError, match="Invalid value for select input 'decision'"): + service.submit_form_by_token( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token", + selected_action_id="submit", + form_data={"decision": "hold"}, + ) + + +def test_submit_form_by_token_invalid_file_list_item(sample_form_record, mock_session_factory) -> None: + session_factory, _ = mock_session_factory + repo = MagicMock(spec=HumanInputFormSubmissionRepository) + definition = FormDefinition( + form_content="hello", + inputs=[FileListInputConfig(output_variable_name="attachments", number_limits=2)], + user_actions=[UserActionConfig(id="submit", title="Submit")], + rendered_content="

hello

", + expiration_time=sample_form_record.expiration_time, + ) + repo.get_by_token.return_value = dataclasses.replace(sample_form_record, definition=definition) + service = HumanInputService(session_factory, form_repository=repo) + + with pytest.raises( + InvalidFormDataError, + match="Invalid value for file list input 'attachments': expected list of mappings", + ): + service.submit_form_by_token( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token", + selected_action_id="submit", + form_data={"attachments": ["not-a-file"]}, + ) + + +def test_submit_form_by_token_rejects_cross_tenant_file(sample_form_record, mock_session_factory, mocker) -> None: + session_factory, _ = mock_session_factory + repo = MagicMock(spec=HumanInputFormSubmissionRepository) + definition = FormDefinition( + form_content="hello", + inputs=[FileInputConfig(output_variable_name="attachment")], + user_actions=[UserActionConfig(id="submit", title="Submit")], + rendered_content="

hello

", + expiration_time=sample_form_record.expiration_time, + ) + repo.get_by_token.return_value = dataclasses.replace(sample_form_record, definition=definition) + service = HumanInputService(session_factory, form_repository=repo) + mocker.patch("services.human_input_service.build_from_mapping", side_effect=ValueError("Invalid upload file")) + + with pytest.raises(InvalidFormDataError, match="Invalid value for file input 'attachment': Invalid upload file"): + service.submit_form_by_token( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token", + selected_action_id="submit", + form_data={ + "attachment": { + "transfer_method": "local_file", + "upload_file_id": "4e0d1b87-52f2-49f6-b8c6-95cd9c954b3e", + "type": "document", + } + }, + ) + + repo.mark_submitted.assert_not_called() + + +def test_submit_form_by_token_rejects_cross_tenant_file_list(sample_form_record, mock_session_factory, mocker) -> None: + session_factory, _ = mock_session_factory + repo = MagicMock(spec=HumanInputFormSubmissionRepository) + definition = FormDefinition( + form_content="hello", + inputs=[FileListInputConfig(output_variable_name="attachments", number_limits=2)], + user_actions=[UserActionConfig(id="submit", title="Submit")], + rendered_content="

hello

", + expiration_time=sample_form_record.expiration_time, + ) + repo.get_by_token.return_value = dataclasses.replace(sample_form_record, definition=definition) + service = HumanInputService(session_factory, form_repository=repo) + mocker.patch("services.human_input_service.build_from_mappings", side_effect=ValueError("Invalid upload file")) + + with pytest.raises( + InvalidFormDataError, + match="Invalid value for file list input 'attachments': Invalid upload file", + ): + service.submit_form_by_token( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token", + selected_action_id="submit", + form_data={ + "attachments": [ + { + "transfer_method": "local_file", + "upload_file_id": "4e0d1b87-52f2-49f6-b8c6-95cd9c954b3e", + "type": "document", + } + ] + }, + ) + + repo.mark_submitted.assert_not_called() diff --git a/api/tests/unit_tests/services/test_message_service.py b/api/tests/unit_tests/services/test_message_service.py index 7adc15d63e..51f8b3ef5b 100644 --- a/api/tests/unit_tests/services/test_message_service.py +++ b/api/tests/unit_tests/services/test_message_service.py @@ -906,7 +906,7 @@ class TestMessageServiceSuggestedQuestions: ): """Test successful suggested questions generation in basic Chat mode.""" # Arrange - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) user = factory.create_end_user_mock() message = factory.create_message_mock() mock_get_message.return_value = message @@ -953,7 +953,7 @@ class TestMessageServiceSuggestedQuestions: """Test suggested question generation uses frontend configured model and prompt.""" from core.app.entities.app_invoke_entities import InvokeFrom - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() @@ -1024,7 +1024,7 @@ class TestMessageServiceSuggestedQuestions: factory, ): """Test invalid frontend configured model falls back to tenant default model.""" - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() diff --git a/api/tests/unit_tests/services/test_model_load_balancing_service.py b/api/tests/unit_tests/services/test_model_load_balancing_service.py index 3119af40a2..beecf73caa 100644 --- a/api/tests/unit_tests/services/test_model_load_balancing_service.py +++ b/api/tests/unit_tests/services/test_model_load_balancing_service.py @@ -104,7 +104,7 @@ def test_enable_disable_model_load_balancing_should_call_provider_configuration_ service.provider_manager.get_configurations.return_value = {"openai": provider_configuration} # Act - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) # Assert getattr(provider_configuration, expected_provider_method).assert_called_once_with( @@ -125,7 +125,7 @@ def test_enable_disable_model_load_balancing_should_raise_value_error_when_provi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_raise_value_error_when_provider_missing( @@ -136,7 +136,7 @@ def test_get_load_balancing_configs_should_raise_value_error_when_provider_missi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_for_custom_provider( @@ -177,7 +177,7 @@ def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_fo "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, ) # Assert @@ -238,7 +238,7 @@ def test_get_load_balancing_configs_should_reorder_existing_inherit_and_tolerate "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, config_from="predefined-model", ) @@ -259,7 +259,7 @@ def test_get_load_balancing_config_should_raise_value_error_when_provider_missin # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") def test_get_load_balancing_config_should_return_none_when_config_not_found( @@ -272,7 +272,7 @@ def test_get_load_balancing_config_should_return_none_when_config_not_found( mock_db.session.scalar.return_value = None # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result is None @@ -292,7 +292,7 @@ def test_get_load_balancing_config_should_return_obfuscated_payload_when_config_ mock_db.session.scalar.return_value = config # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result == { @@ -335,7 +335,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_provider_mi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [], "custom-model", ) @@ -354,7 +354,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_configs_is_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], "invalid-configs"), "custom-model", ) @@ -375,7 +375,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_config_item "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], ["bad-item"]), "custom-model", ) @@ -397,7 +397,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credential_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -418,7 +418,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"enabled": True}], "custom-model", ) @@ -428,7 +428,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "cfg-without-enabled"}], "custom-model", ) @@ -450,7 +450,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_existing_co "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-2", "name": "invalid", "enabled": True}], "custom-model", ) @@ -472,7 +472,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-1", "name": "new", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -482,7 +482,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new-config", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -519,7 +519,7 @@ def test_update_load_balancing_configs_should_update_existing_create_new_and_del "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [ {"id": "cfg-1", "name": "updated-name", "enabled": False, "credentials": {"api_key": "plain"}}, {"name": "new-config", "enabled": True, "credentials": {"api_key": "plain"}}, @@ -553,7 +553,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "__inherit__", "enabled": True, "credentials": {"api_key": "x"}}], "custom-model", ) @@ -563,7 +563,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new", "enabled": True}], "custom-model", ) @@ -585,7 +585,7 @@ def test_update_load_balancing_configs_should_create_from_existing_provider_cred "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -611,7 +611,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_provi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) @@ -631,7 +631,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_confi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -654,7 +654,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -662,7 +662,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) diff --git a/api/tests/unit_tests/services/test_model_provider_service.py b/api/tests/unit_tests/services/test_model_provider_service.py index 28d459eac9..9e4eeb2d6e 100644 --- a/api/tests/unit_tests/services/test_model_provider_service.py +++ b/api/tests/unit_tests/services/test_model_provider_service.py @@ -90,7 +90,7 @@ class TestModelProviderServiceConfiguration: ) manager.get_configurations.return_value = {"openai": allowed, "embedding": filtered} - result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM) assert len(result) == 1 assert result[0].provider == "openai" @@ -232,7 +232,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -245,7 +245,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, }, @@ -258,7 +258,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_name": "cred-a", @@ -277,7 +277,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_id": "cred-1", @@ -298,7 +298,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -311,7 +311,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -324,7 +324,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -337,7 +337,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", }, "delete_custom_model", @@ -425,7 +425,7 @@ class TestModelProviderServiceListingsAndDefaults: provider_configurations = SimpleNamespace(get_models=MagicMock(return_value=models)) manager.get_configurations.return_value = provider_configurations - result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) provider_configurations.get_models.assert_called_once_with(model_type=ModelType.LLM, only_active=True) assert len(result) == 1 @@ -495,7 +495,7 @@ class TestModelProviderServiceListingsAndDefaults: ), ) - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is not None assert result.model == "gpt-4o" @@ -506,7 +506,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.return_value = None - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -514,7 +514,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.side_effect = RuntimeError("boom") - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -523,7 +523,7 @@ class TestModelProviderServiceListingsAndDefaults: service.update_default_model_of_model_type( tenant_id="tenant-1", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, provider="openai", model="gpt-4o", ) @@ -593,7 +593,7 @@ class TestModelProviderServiceListingsAndDefaults: tenant_id="tenant-1", provider="openai", model="gpt-4o", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, ) getattr(provider_configuration, provider_method_name).assert_called_once_with( diff --git a/api/tests/unit_tests/services/test_trigger_provider_service.py b/api/tests/unit_tests/services/test_trigger_provider_service.py index 6eba60e5f1..4da4af2d93 100644 --- a/api/tests/unit_tests/services/test_trigger_provider_service.py +++ b/api/tests/unit_tests/services/test_trigger_provider_service.py @@ -325,7 +325,7 @@ def test_update_trigger_subscription_should_raise_error_when_name_conflicts( id="sub-1", name="old", provider_id="langgenius/github/github", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.side_effect = [subscription, object()] # found sub, name conflict _mock_get_trigger_provider(mocker, provider_controller) @@ -350,7 +350,7 @@ def test_update_trigger_subscription_should_update_fields_and_clear_cache( properties={"project": "enc-old"}, parameters={"event": "old"}, credentials={"api_key": "enc-old"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credential_expires_at=0, expires_at=0, ) @@ -456,7 +456,7 @@ def test_delete_trigger_provider_should_delete_and_clear_cache_even_if_unsubscri id="sub-1", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"token": "enc"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -492,7 +492,7 @@ def test_delete_trigger_provider_should_skip_unsubscribe_for_unauthorized( id="sub-2", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.UNAUTHORIZED.value, + credential_type=CredentialType.UNAUTHORIZED, credentials={}, to_entity=lambda: SimpleNamespace(id="sub-2"), ) @@ -527,7 +527,7 @@ def test_refresh_oauth_token_should_raise_error_for_non_oauth_credentials( mocker: MockerFixture, mock_session: MagicMock ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY) mock_session.scalar.return_value = subscription # Act + Assert @@ -545,7 +545,7 @@ def test_refresh_oauth_token_should_refresh_and_persist_new_credentials( subscription = SimpleNamespace( provider_id=str(provider_id), user_id="user-1", - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"access_token": "enc"}, credential_expires_at=0, ) @@ -613,7 +613,7 @@ def test_refresh_subscription_should_refresh_and_persist_properties( parameters={"event": "push"}, properties={"p": "enc"}, credentials={"c": "enc"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.return_value = subscription _mock_get_trigger_provider(mocker, provider_controller) @@ -989,7 +989,7 @@ def test_verify_subscription_credentials_should_raise_when_api_key_validation_fa provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) provider_controller.validate_credentials.side_effect = RuntimeError("bad credentials") @@ -1012,7 +1012,7 @@ def test_verify_subscription_credentials_should_return_verified_when_api_key_val provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1036,7 +1036,7 @@ def test_verify_subscription_credentials_should_return_verified_for_non_api_key_ provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2.value, credentials={}) + subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2, credentials={}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1100,7 +1100,7 @@ def test_rebuild_trigger_subscription_should_raise_for_unsupported_credential_ty provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED.value) + subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1126,7 +1126,7 @@ def test_rebuild_trigger_subscription_should_raise_when_unsubscribe_fails( id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -1159,7 +1159,7 @@ def test_rebuild_trigger_subscription_should_resubscribe_and_update_existing_sub id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old-key"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) diff --git a/api/tests/unit_tests/services/test_webhook_service.py b/api/tests/unit_tests/services/test_webhook_service.py index c3335e5723..12e7b606fd 100644 --- a/api/tests/unit_tests/services/test_webhook_service.py +++ b/api/tests/unit_tests/services/test_webhook_service.py @@ -140,7 +140,7 @@ class TestWebhookServiceUnit: assert args[1] == "text/plain" assert args[2] is webhook_trigger - def test_detect_binary_mimetype_uses_magic(self, monkeypatch): + def test_detect_binary_mimetype_uses_magic(self, monkeypatch: pytest.MonkeyPatch): """python-magic output should be used when available.""" fake_magic = MagicMock() fake_magic.from_buffer.return_value = "image/png" @@ -151,7 +151,7 @@ class TestWebhookServiceUnit: assert result == "image/png" fake_magic.from_buffer.assert_called_once() - def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch): + def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic is unavailable.""" monkeypatch.setattr("services.trigger.webhook_service.magic", None) @@ -159,7 +159,7 @@ class TestWebhookServiceUnit: assert result == "application/octet-stream" - def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch): + def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic raises an exception.""" try: import magic as real_magic diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index feafada59a..642a459e0b 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -61,7 +61,7 @@ class TestWorkflowAssociatedDataFactory: def create_app_mock( app_id: str = "app-123", tenant_id: str = "tenant-456", - mode: str = AppMode.WORKFLOW.value, + mode: str = AppMode.WORKFLOW, workflow_id: str | None = None, **kwargs, ) -> MagicMock: @@ -93,7 +93,7 @@ class TestWorkflowAssociatedDataFactory: tenant_id: str = "tenant-456", app_id: str = "app-123", version: str = Workflow.VERSION_DRAFT, - workflow_type: str = WorkflowType.WORKFLOW.value, + workflow_type: str = WorkflowType.WORKFLOW, graph: dict[str, Any] | None = None, features: dict[str, Any] | None = None, unique_hash: str | None = None, @@ -584,7 +584,7 @@ class TestWorkflowService: id="published-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version="2026-03-19T00:00:00", graph=json.dumps(TestWorkflowAssociatedDataFactory.create_valid_workflow_graph()), features=json.dumps(legacy_features), @@ -597,7 +597,7 @@ class TestWorkflowService: id="draft-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version=Workflow.VERSION_DRAFT, graph=json.dumps({"nodes": [], "edges": []}), features=json.dumps({}), @@ -685,7 +685,7 @@ class TestWorkflowService: Different app modes have different feature configurations. This ensures the features match the expected schema for workflow apps. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) features = {"file_upload": {"enabled": False}} with patch("services.workflow_service.WorkflowAppConfigManager.config_validate") as mock_validate: @@ -696,7 +696,7 @@ class TestWorkflowService: def test_validate_features_structure_advanced_chat_mode(self, workflow_service): """Test validate_features_structure for advanced chat mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT) features = {"opening_statement": "Hello"} with patch("services.workflow_service.AdvancedChatAppConfigManager.config_validate") as mock_validate: @@ -707,7 +707,7 @@ class TestWorkflowService: def test_validate_features_structure_invalid_mode_raises_error(self, workflow_service): """Test validate_features_structure raises error for invalid mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) features = {} with pytest.raises(ValueError, match="Invalid app mode"): @@ -1326,7 +1326,7 @@ class TestWorkflowService: The conversion creates equivalent workflow nodes from the chat configuration, giving users more control and customization options. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = { "name": "Converted Workflow", @@ -1337,7 +1337,7 @@ class TestWorkflowService: with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1353,13 +1353,13 @@ class TestWorkflowService: Completion apps are simpler (single prompt-response), so the conversion creates a basic workflow with fewer nodes. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {"name": "Converted Workflow"} with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1373,7 +1373,7 @@ class TestWorkflowService: Only chat and completion apps can be converted to workflows. Apps that are already workflows or have other modes cannot be converted. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {} @@ -2087,7 +2087,7 @@ class TestSetupVariablePool: This helper initialises the VariablePool used for single-step workflow execution. """ - def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW.value) -> MagicMock: + def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW) -> MagicMock: wf = MagicMock(spec=Workflow) wf.app_id = "app-1" wf.id = "wf-1" @@ -2176,7 +2176,7 @@ class TestSetupVariablePool: from models.workflow import WorkflowType # Arrange - workflow = self._make_workflow(workflow_type=WorkflowType.CHAT.value) + workflow = self._make_workflow(workflow_type=WorkflowType.CHAT) # Act with ( @@ -2650,6 +2650,7 @@ class TestWorkflowServiceHumanInputOperations: mock_node = MagicMock() mock_node.node_data = MagicMock() mock_node.node_data.outputs_field_names.return_value = ["field1"] + mock_node.node_data.inputs = [] with ( patch("services.workflow_service.db"), @@ -2657,7 +2658,10 @@ class TestWorkflowServiceHumanInputOperations: patch("models.workflow.Workflow.get_node_type_from_node_config", return_value=BuiltinNodeTypes.HUMAN_INPUT), patch.object(service, "_build_human_input_variable_pool"), patch("services.workflow_service.HumanInputNode", return_value=mock_node), - patch("services.workflow_service.validate_human_input_submission"), + patch( + "services.workflow_service.HumanInputService.validate_and_normalize_submission", + return_value={"field1": "val1"}, + ) as mock_validate, patch("services.workflow_service.Session"), patch("services.workflow_service.DraftVariableSaver") as mock_saver_cls, ): @@ -2665,6 +2669,7 @@ class TestWorkflowServiceHumanInputOperations: app_model=app_model, account=account, node_id="node-1", form_inputs={"field1": "val1"}, action="submit" ) assert result["__action_id"] == "submit" + mock_validate.assert_called_once() mock_saver_cls.return_value.save.assert_called_once() def test_test_human_input_delivery_success(self, service: WorkflowService) -> None: diff --git a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py index ce0d94398d..c210db580e 100644 --- a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py +++ b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py @@ -180,7 +180,7 @@ class TestSetDefaultProvider: session.scalar.return_value = None with pytest.raises(ValueError, match="provider not found"): - BuiltinToolManageService.set_default_provider("t", "u", "p", "id") + BuiltinToolManageService.set_default_provider("t", "p", "id") @patch(f"{MODULE}.sessionmaker") @patch(f"{MODULE}.db") @@ -189,11 +189,29 @@ class TestSetDefaultProvider: target = MagicMock() session.scalar.return_value = target - result = BuiltinToolManageService.set_default_provider("t", "u", "p", "id") + result = BuiltinToolManageService.set_default_provider("t", "p", "id") assert result == {"result": "success"} assert target.is_default is True + @patch(f"{MODULE}.sessionmaker") + @patch(f"{MODULE}.db") + def test_clear_default_is_tenant_scoped_not_user_scoped(self, mock_db, mock_sm_cls): + # Regression: clearing prior defaults must NOT filter by user_id, otherwise + # two workspace members can each leave their own credential as default at + # the same time (the default flag is tenant-scoped, not per-user). + session = _mock_sessionmaker(mock_sm_cls) + session.scalar.return_value = MagicMock() + + BuiltinToolManageService.set_default_provider("tenant-1", "google", "cred-id") + + session.execute.assert_called_once() + update_stmt = session.execute.call_args.args[0] + compiled = str(update_stmt.compile(compile_kwargs={"literal_binds": True})) + assert "user_id" not in compiled + assert "tenant_id" in compiled + assert "provider" in compiled + class TestUpdateBuiltinToolProvider: @patch(f"{MODULE}.sessionmaker") diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index 663eec6a06..b5b9f0bd97 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -398,7 +398,7 @@ class TestWorkflowDraftVariableService: self, mock_engine, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable when execution record doesn't exist""" mock_repo_session = Mock(spec=Session) @@ -435,7 +435,7 @@ class TestWorkflowDraftVariableService: def test_reset_node_variable_with_valid_execution_record( self, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable with valid execution record - should restore from execution""" mock_repo_session = Mock(spec=Session) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index dfdbd9acd6..17e9a077d6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -414,8 +414,8 @@ def test_parse_event_message_should_parse_only_json_object( def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: # Arrange - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} # Act is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) @@ -426,7 +426,7 @@ def test_is_terminal_event_should_recognize_finished_and_optional_paused_events( assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, close_on_pause=True) is False def test_apply_message_context_should_update_payload_when_context_exists() -> None: @@ -569,7 +569,7 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) # Act @@ -584,9 +584,9 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -643,7 +643,7 @@ def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_id ) # Assert - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -686,7 +686,7 @@ def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( ) # Assert - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -706,7 +706,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -729,7 +729,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None @@ -779,7 +779,7 @@ def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.M session_maker=cast(sessionmaker[Session], session_maker), ) - assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED assert events[-2]["data"]["form_token"] == "wtok" assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) pause_data = events[-1]["data"] @@ -837,6 +837,6 @@ def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_contex ) pause_event = cast(Mapping[str, Any], events[-1]) - assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py index d2634d7d7b..4d711f1bf8 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py @@ -215,8 +215,8 @@ class TestWorkflowEventSnapshotHelpers: assert result == expected def test_is_terminal_event_should_recognize_finished_and_optional_paused_events(self) -> None: - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} is_finished = service_module._is_terminal_event(finished_event, include_paused=False) paused_without_flag = service_module._is_terminal_event(paused_event, include_paused=False) @@ -225,7 +225,7 @@ class TestWorkflowEventSnapshotHelpers: assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, include_paused=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, include_paused=True) is False def test_apply_message_context_should_update_payload_when_context_exists(self) -> None: payload: dict[str, Any] = {"event": "workflow_started"} @@ -352,7 +352,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) events = list( @@ -365,9 +365,9 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -421,7 +421,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( @@ -461,7 +461,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( @@ -480,7 +480,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -501,5 +501,5 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index 72508bef52..2544c9d61a 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -122,7 +122,7 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) @@ -208,7 +208,7 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) diff --git a/api/uv.lock b/api/uv.lock index e6b790be48..c145ea5b14 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -50,7 +50,10 @@ members = [ "dify-vdb-vikingdb", "dify-vdb-weaviate", ] -overrides = [{ name = "pyarrow", specifier = ">=18.0.0" }] +overrides = [ + { name = "litellm", specifier = ">=1.83.7" }, + { name = "pyarrow", specifier = ">=18.0.0" }, +] [[package]] name = "abnf" @@ -898,14 +901,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.1" +version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, ] [[package]] @@ -1685,7 +1688,7 @@ requires-dist = [ { name = "gmpy2", specifier = ">=2.3.0" }, { name = "google-api-python-client", specifier = ">=2.195.0" }, { name = "google-cloud-aiplatform", specifier = ">=1.149.0,<2.0.0" }, - { name = "graphon", specifier = "~=0.2.2" }, + { name = "graphon", git = "https://github.com/QuantumGhost/graphon?branch=hitl-form-dev" }, { name = "gunicorn", specifier = ">=25.3.0" }, { name = "httpx", extras = ["socks"], specifier = ">=0.28.1,<1.0.0" }, { name = "httpx-sse", specifier = "~=0.4.0" }, @@ -1720,7 +1723,7 @@ dev = [ { name = "lxml-stubs", specifier = ">=0.5.1" }, { name = "mypy", specifier = ">=1.20.2" }, { name = "pandas-stubs", specifier = ">=3.0.0" }, - { name = "pyrefly", specifier = ">=0.62.0" }, + { name = "pyrefly", specifier = ">=0.64.0" }, { name = "pytest", specifier = ">=9.0.3" }, { name = "pytest-benchmark", specifier = ">=5.2.3" }, { name = "pytest-cov", specifier = ">=7.1.0" }, @@ -2775,14 +2778,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.47" +version = "3.1.49" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c1/bd/50db468e9b1310529a19fce651b3b0e753b5c07954d486cba31bbee9a5d5/gitpython-3.1.47.tar.gz", hash = "sha256:dba27f922bd2b42cb54c87a8ab3cb6beb6bf07f3d564e21ac848913a05a8a3cd", size = 216978, upload-time = "2026-04-22T02:44:44.059Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/63/210aaa302d6a0a78daa67c5c15bbac2cad361722841278b0209b6da20855/gitpython-3.1.49.tar.gz", hash = "sha256:42f9399c9eb33fc581014bedd76049dfbaf6375aa2a5754575966387280315e1", size = 219367, upload-time = "2026-04-29T00:31:20.478Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f2/c5/a1bc0996af85757903cf2bf444a7824e68e0035ce63fb41d6f76f9def68b/gitpython-3.1.47-py3-none-any.whl", hash = "sha256:489f590edfd6d20571b2c0e72c6a6ac6915ee8b8cd04572330e3842207a78905", size = 209547, upload-time = "2026-04-22T02:44:41.271Z" }, + { url = "https://files.pythonhosted.org/packages/fd/6f/b842bfa6f21d6f87c57f9abf7194225e55279d96d869775e19e9f7236fc5/gitpython-3.1.49-py3-none-any.whl", hash = "sha256:024b0422d7f84d15cd794844e029ffebd4c5d42a7eb9b936b458697ef550a02c", size = 212190, upload-time = "2026-04-29T00:31:18.412Z" }, ] [[package]] @@ -3056,7 +3059,7 @@ httpx = [ [[package]] name = "graphon" version = "0.2.2" -source = { registry = "https://pypi.org/simple" } +source = { git = "https://github.com/QuantumGhost/graphon?branch=hitl-form-dev#91941bfcbc1fbd0107ada2423f8ffaa628957ae7" } dependencies = [ { name = "charset-normalizer" }, { name = "httpx" }, @@ -3076,10 +3079,6 @@ dependencies = [ { name = "unstructured", extra = ["docx", "epub", "md", "ppt", "pptx"] }, { name = "webvtt-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/08/50/e745a79c5f742f88f6011a1f7c9ba2c2f9cc1beedd982f0b192f1ab8c748/graphon-0.2.2.tar.gz", hash = "sha256:141f0de536171850f1af6f738dc66f0285aadd3c097f1dad2a038636789e0aa5", size = 236360, upload-time = "2026-04-17T08:52:28.047Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/de/89/a6340afdaf5169d17a318e00fc685fb67ed99baa602c2cbbbf6af6a76096/graphon-0.2.2-py3-none-any.whl", hash = "sha256:754e544d08779138f99eac6547ab08559463680e2c76488b05e1c978210392b4", size = 340808, upload-time = "2026-04-17T08:52:26.5Z" }, -] [[package]] name = "graphql-core" @@ -3473,14 +3472,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320, upload-time = "2024-08-20T17:11:42.348Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304, upload-time = "2024-09-11T14:56:08.937Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269, upload-time = "2024-08-20T17:11:41.102Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514, upload-time = "2024-09-11T14:56:07.019Z" }, ] [[package]] @@ -3664,7 +3663,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.25.1" +version = "4.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -3672,9 +3671,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778, upload-time = "2024-07-08T18:40:05.546Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, + { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462, upload-time = "2024-07-08T18:40:00.165Z" }, ] [[package]] @@ -3971,7 +3970,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.83.0" +version = "1.83.14" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -3987,9 +3986,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/92/6ce9737554994ca8e536e5f4f6a87cc7c4774b656c9eb9add071caf7d54b/litellm-1.83.0.tar.gz", hash = "sha256:860bebc76c4bb27b4cf90b4a77acd66dba25aced37e3db98750de8a1766bfb7a", size = 17333062, upload-time = "2026-03-31T05:08:25.331Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/7c/c095649380adc96c8630273c1768c2ad1e74aa2ee1dd8dd05d218a60569f/litellm-1.83.14.tar.gz", hash = "sha256:24aef9b47cdc424c833e32f3727f411741c690832cd1fe4405e0077144fe09c9", size = 14836599, upload-time = "2026-04-26T03:16:10.176Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/2c/a670cc050fcd6f45c6199eb99e259c73aea92edba8d5c2fc1b3686d36217/litellm-1.83.0-py3-none-any.whl", hash = "sha256:88c536d339248f3987571493015784671ba3f193a328e1ea6780dbebaa2094a8", size = 15610306, upload-time = "2026-03-31T05:08:21.987Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5c/1b5691575420135e90578543b2bf219497caa33cfd0af64cb38f30288450/litellm-1.83.14-py3-none-any.whl", hash = "sha256:92b11ba2a32cf80707ddf388d18526696c7999a21b418c5e3b6eda1243d2cfdb", size = 16457054, upload-time = "2026-04-26T03:16:05.72Z" }, ] [[package]] @@ -4057,14 +4056,14 @@ wheels = [ [[package]] name = "mako" -version = "1.3.10" +version = "1.3.12" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markupsafe" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9e/38/bd5b78a920a64d708fe6bc8e0a2c075e1389d53bef8413725c63ba041535/mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28", size = 392474, upload-time = "2025-04-10T12:44:31.16Z" } +sdist = { url = "https://files.pythonhosted.org/packages/00/62/791b31e69ae182791ec67f04850f2f062716bbd205483d63a215f3e062d3/mako-1.3.12.tar.gz", hash = "sha256:9f778e93289bd410bb35daadeb4fc66d95a746f0b75777b942088b7fd7af550a", size = 400219, upload-time = "2026-04-28T19:01:08.512Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/87/fb/99f81ac72ae23375f22b7afdb7642aba97c00a713c217124420147681a2f/mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59", size = 78509, upload-time = "2025-04-10T12:50:53.297Z" }, + { url = "https://files.pythonhosted.org/packages/bc/b1/a0ec7a5a9db730a08daef1fdfb8090435b82465abbf758a596f0ea88727e/mako-1.3.12-py3-none-any.whl", hash = "sha256:8f61569480282dbf557145ce441e4ba888be453c30989f879f0d652e39f53ea9", size = 78521, upload-time = "2026-04-28T19:01:10.393Z" }, ] [[package]] @@ -4489,7 +4488,7 @@ wheels = [ [[package]] name = "openai" -version = "2.8.1" +version = "2.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -4501,9 +4500,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717, upload-time = "2026-02-24T20:02:07.958Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122, upload-time = "2026-02-24T20:02:05.669Z" }, ] [[package]] @@ -5747,19 +5746,19 @@ wheels = [ [[package]] name = "pyrefly" -version = "0.62.0" +version = "0.64.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ad/8874ed25781e7dd561c6d75fb4a7becf10a18d75b074f25b845cc334f781/pyrefly-0.62.0.tar.gz", hash = "sha256:da1fbe1075dc1e6c8e3134e9370b0a0e7a296061d782cca5bf83dbb8e4c10d7c", size = 5537672, upload-time = "2026-04-20T17:12:15.718Z" } +sdist = { url = "https://files.pythonhosted.org/packages/85/99/923622d7b52ef84e83f357b19bd08dff063ccc5f4472b003105e1f308d93/pyrefly-0.64.0.tar.gz", hash = "sha256:fbfcdb0031adadc340b6c64cb41c6094c95349ee952fe3d4c143866add829172", size = 5678516, upload-time = "2026-05-06T17:28:44.056Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/ea/09bd9da7d5df294db800312fb415be2fefbaa5594178e9e49f44fa071aea/pyrefly-0.62.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9d78ec4f126dee1fa76215b193b964490ce10e62a32d2787a72c51623658b803", size = 13020414, upload-time = "2026-04-20T17:11:43.617Z" }, - { url = "https://files.pythonhosted.org/packages/4b/f0/f84afac4f220c4c8c801b779ee2ff28ad3f7731f4283c2e1b6ee9012e8c2/pyrefly-0.62.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2a41a34902d20756264486f9e309f22633d100261bd960feea6e858a098d985d", size = 12515659, upload-time = "2026-04-20T17:11:46.59Z" }, - { url = "https://files.pythonhosted.org/packages/40/0b/620c39cefa9ae1b25ee7a2da9d8d3c278b095649cb8435c5e01ea64f7c17/pyrefly-0.62.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4666c6b65aea662e5f77b64dc91c091b7ea5cede6aa66c0f4cbae26480403583", size = 36228332, upload-time = "2026-04-20T17:11:50.523Z" }, - { url = "https://files.pythonhosted.org/packages/2d/fb/47b8b76438c12761e509a3666cd5a99d4af7f21976ba8385feb475cbfe30/pyrefly-0.62.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1aefab798f47d37c13ded791192fee9b39a6d2b12e31f38ae06a1f80c4b26e22", size = 38995741, upload-time = "2026-04-20T17:11:54.702Z" }, - { url = "https://files.pythonhosted.org/packages/55/d2/03bd17673f61147cd5609cd7d6a1455eeccc17a07a7e141ed9931b0c42c0/pyrefly-0.62.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa986b50d56740da1d7ae7c660a505143cb9d286fa98cc7e5f4a759cc6eaa5d", size = 37205321, upload-time = "2026-04-20T17:11:58.9Z" }, - { url = "https://files.pythonhosted.org/packages/75/14/20ba7b7f2d182f9b7c1e24a3041dac9b5730ae28cfe1614a2c98706650f2/pyrefly-0.62.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32e9b175805c82ffb967e4708f4910bace7e1a12736907380cc9afdbaabb0efb", size = 41786834, upload-time = "2026-04-20T17:12:03.221Z" }, - { url = "https://files.pythonhosted.org/packages/fa/c8/5a7ba88c4fa1b5090d877f70fa1b742b921b9e7d8d3f4b6b9b1ba1820850/pyrefly-0.62.0-py3-none-win32.whl", hash = "sha256:1cd98edc20cab5bac8016c9220ee66080e39bd22e7f0e9bb3e2c4e2be1555eed", size = 12010170, upload-time = "2026-04-20T17:12:06.791Z" }, - { url = "https://files.pythonhosted.org/packages/2e/78/d8f810de010ff2ed594c630c724fd817ef430963249e9eb396ce8f785e9d/pyrefly-0.62.0-py3-none-win_amd64.whl", hash = "sha256:6994f8ee7d6720325ee52207fbdaca98a799a1efe462bb5ba90c47160f7f3e6e", size = 12861816, upload-time = "2026-04-20T17:12:09.689Z" }, - { url = "https://files.pythonhosted.org/packages/c7/a9/ac824ef6a3f50b7c0ec5974471f8f2cb205cd1edd53a5abbcf7ba37feb5d/pyrefly-0.62.0-py3-none-win_arm64.whl", hash = "sha256:362a5d47a5ac5aaa5258091e878a1759ff8b687d8cf462af1c516144f7b0108a", size = 12352977, upload-time = "2026-04-20T17:12:12.736Z" }, + { url = "https://files.pythonhosted.org/packages/b8/1c/b001b7e84a811dbb3c85e31bd4bfc3edfa3c94438140cd1d6e8c06b7c1df/pyrefly-0.64.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:683b317d8d0e815fb2ad75b7e0fa6c15eed5be4bcbc407dc13312984da3a9c47", size = 13287462, upload-time = "2026-05-06T17:28:19.169Z" }, + { url = "https://files.pythonhosted.org/packages/89/02/1e6fcd311bd7c24aaccc0afb998d584e1fa6c370e1428b4b091103760efe/pyrefly-0.64.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:96913cc4f066a7bd008b9dba8e3951234e92bb8a3a2cb1aea0e274fd2a444c55", size = 12777104, upload-time = "2026-05-06T17:28:22.047Z" }, + { url = "https://files.pythonhosted.org/packages/d6/2b/3f347b8d97c9065d6ace14a22591c8d91e64610e74e0d4f214b3025ebcf7/pyrefly-0.64.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2ae557e1b6a6a5bda844806cae10b212cf84ea786ece10d55083a0321ee1705", size = 37064924, upload-time = "2026-05-06T17:28:24.743Z" }, + { url = "https://files.pythonhosted.org/packages/73/dd/0b40175e930a96139a8e9f62a8e1db7f9a5e9df8e6cef08bf280affcb05e/pyrefly-0.64.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d062ac1744346efacd7df23c6bbff662ad29ed495923cb59ede656a306355655", size = 39719832, upload-time = "2026-05-06T17:28:28.042Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4b/0afb4ad02eb67ddb299ff3f7108ceb307e520578b00e900d07f2371423ca/pyrefly-0.64.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6850b305d45121911fbe25ad56497d2e887b387ea50644ba15a8ad2a8cf855f4", size = 37861666, upload-time = "2026-05-06T17:28:31.234Z" }, + { url = "https://files.pythonhosted.org/packages/e5/1b/f5390f8678433708288afab13f043ddd021a55dba3f665360d2c9396ee04/pyrefly-0.64.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a259925620a84fe87cd30a82643ec524eeef631f0c4ec5af81a21e006c2f5b1", size = 42634235, upload-time = "2026-05-06T17:28:34.405Z" }, + { url = "https://files.pythonhosted.org/packages/47/f7/4b66934e375dde3e4d75373b1a94eb7e7c0c0c788e94267641a223930180/pyrefly-0.64.0-py3-none-win32.whl", hash = "sha256:20317f6dd97e22bc508b8dbc537e59b0ab58e384113ee61920c87ed1a6a12f62", size = 12213388, upload-time = "2026-05-06T17:28:37.146Z" }, + { url = "https://files.pythonhosted.org/packages/0a/15/653523d99795041a1be6dadf7a73225317cb2aae4b21e6df57edbce807f0/pyrefly-0.64.0-py3-none-win_amd64.whl", hash = "sha256:e88fc6a83add9b7c2224be0f74df1b0db10b3af856ae30e4e0a90ba3644c712f", size = 13136719, upload-time = "2026-05-06T17:28:39.767Z" }, + { url = "https://files.pythonhosted.org/packages/50/bb/9ea1c26b511b38a3e1eefc1bd3de7d3f65b2bbfdb59295f3244f61564a81/pyrefly-0.64.0-py3-none-win_arm64.whl", hash = "sha256:73744bd95e836abda0d08e9cdcf008142090ae0124c8f8ff477c944b60c0343c", size = 12526050, upload-time = "2026-05-06T17:28:42.077Z" }, ] [[package]] @@ -6911,27 +6910,28 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, ] [[package]] diff --git a/dev/pytest/pytest_config_tests.py b/dev/pytest/pytest_config_tests.py index d56cceff5e..b136f09c61 100644 --- a/dev/pytest/pytest_config_tests.py +++ b/dev/pytest/pytest_config_tests.py @@ -93,10 +93,16 @@ BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF: frozenset[str] = frozenset( API_CONFIG_SET = set(dotenv_values(Path("api") / Path(".env.example")).keys()) DOCKER_CONFIG_SET = set(dotenv_values(Path("docker") / Path(".env.example")).keys()) -DOCKER_COMPOSE_CONFIG_SET = set() +DOCKER_COMPOSE_CONFIG_SET = set(DOCKER_CONFIG_SET) -with open(Path("docker") / Path("docker-compose.yaml")) as f: - DOCKER_COMPOSE_CONFIG_SET = set(yaml.safe_load(f.read())["x-shared-env"].keys()) +# Read environment variables from the split env files used by docker-compose +# Walk through all .env.example files in subdirectories (per-module structure) +envs_dir = Path("docker") / Path("envs") +if envs_dir.exists(): + for env_file_path in envs_dir.rglob("*.env.example"): + env_keys = set(dotenv_values(env_file_path).keys()) + DOCKER_CONFIG_SET.update(env_keys) + DOCKER_COMPOSE_CONFIG_SET.update(env_keys) def test_yaml_config(): diff --git a/docker/.env.example b/docker/.env.example index 29741474fa..82bd837ffb 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1,1249 +1,157 @@ -# ------------------------------ -# Environment Variables for API service & worker -# ------------------------------ +# ------------------------------------------------------------------ +# Essential defaults for Docker Compose deployments. +# +# For a default deployment, copy this file to .env and run: +# docker compose up -d +# +# Optional and provider-specific variables live under docker/envs/. +# Copy an optional *.env.example file beside itself without the +# .example suffix when you need those advanced settings. +# Values in docker/.env take precedence over docker/envs/*.env files. +# ------------------------------------------------------------------ -# ------------------------------ -# Common Variables -# ------------------------------ - -# The backend URL of the console API, -# used to concatenate the authorization callback. -# If empty, it is the same domain. -# Example: https://api.console.dify.ai +# Core service URLs CONSOLE_API_URL= - -# The front-end URL of the console web, -# used to concatenate some front-end addresses and for CORS configuration use. -# If empty, it is the same domain. -# Example: https://console.dify.ai CONSOLE_WEB_URL= - -# Service API Url, -# used to display Service API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://api.dify.ai SERVICE_API_URL= - -# Trigger external URL -# used to display trigger endpoint API Base URL to the front-end. -# Example: https://api.dify.ai TRIGGER_URL=http://localhost - -# WebApp API backend Url, -# used to declare the back-end URL for the front-end API. -# If empty, it is the same domain. -# Example: https://api.app.dify.ai APP_API_URL= - -# WebApp Url, -# used to display WebAPP API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://app.dify.ai APP_WEB_URL= - -# File preview or download Url prefix. -# used to display File preview or download Url to the front-end or as Multi-model inputs; -# Url is signed and has expiration time. -# Setting FILES_URL is required for file processing plugins. -# - For https://example.com, use FILES_URL=https://example.com -# - For http://example.com, use FILES_URL=http://example.com -# Recommendation: use a dedicated domain (e.g., https://upload.example.com). -# Alternatively, use http://:5001 or http://api:5001, -# ensuring port 5001 is externally accessible (see docker-compose.yaml). FILES_URL= - -# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network. -# Set this to the internal Docker service URL for proper plugin file access. -# Example: INTERNAL_FILES_URL=http://api:5001 INTERNAL_FILES_URL= +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} +NEXT_PUBLIC_SOCKET_URL=ws://localhost -# Ensure UTF-8 encoding +# Runtime and security LANG=C.UTF-8 LC_ALL=C.UTF-8 PYTHONIOENCODING=utf-8 - -# Set UV cache directory to avoid permission issues with non-existent home directory UV_CACHE_DIR=/tmp/.uv-cache - -# ------------------------------ -# Server Configuration -# ------------------------------ - -# The log level for the application. -# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` -LOG_LEVEL=INFO -# Log output format: text or json -LOG_OUTPUT_FORMAT=text -# Log file path -LOG_FILE=/app/logs/server.log -# Log file max size, the unit is MB -LOG_FILE_MAX_SIZE=20 -# Log file max backup count -LOG_FILE_BACKUP_COUNT=5 -# Log dateformat -LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S -# Log Timezone -LOG_TZ=UTC - -# Debug mode, default is false. -# It is recommended to turn on this configuration for local development -# to prevent some problems caused by monkey patch. -DEBUG=false - -# Flask debug mode, it can output trace information at the interface when turned on, -# which is convenient for debugging. -FLASK_DEBUG=false - -# Enable request logging, which will log the request and response information. -# And the log level is DEBUG -ENABLE_REQUEST_LOGGING=False - -# A secret key that is used for securely signing the session cookie -# and encrypting sensitive information on the database. -# You can generate a strong key using `openssl rand -base64 42`. SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U - -# Password for admin user initialization. -# If left unset, admin user will not be prompted for a password -# when creating the initial admin account. -# The length of the password cannot exceed 30 characters. INIT_PASSWORD= - -# Deployment environment. -# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. -# Testing environment. There will be a distinct color label on the front-end page, -# indicating that this environment is a testing environment. DEPLOY_ENV=PRODUCTION - -# Whether to enable the version check policy. -# If set to empty, https://updates.dify.ai will be called for version check. CHECK_UPDATE_URL=https://updates.dify.ai - -# Used to change the OpenAI base address, default is https://api.openai.com/v1. -# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, -# or when a local model provides OpenAI compatible API, it can be replaced. OPENAI_API_BASE=https://api.openai.com/v1 - -# When enabled, migrations will be executed prior to application startup -# and the application will start after the migrations have completed. MIGRATION_ENABLED=true - -# File Access Time specifies a time interval in seconds for the file to be accessed. -# The default value is 300 seconds. FILES_ACCESS_TIMEOUT=300 - -# Collaboration mode toggle -# To open collaboration features, you also need to set SERVER_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker ENABLE_COLLABORATION_MODE=false -# Access token expiration time in minutes -ACCESS_TOKEN_EXPIRE_MINUTES=60 - -# Refresh token expiration time in days -REFRESH_TOKEN_EXPIRE_DAYS=30 - -# The default number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_DEFAULT_ACTIVE_REQUESTS=0 -# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_MAX_ACTIVE_REQUESTS=0 -APP_MAX_EXECUTION_TIME=1200 - -# ------------------------------ -# Container Startup Related Configuration -# Only effective when starting with docker image or docker-compose. -# ------------------------------ - -# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +# Logging and server workers +LOG_LEVEL=INFO +LOG_OUTPUT_FORMAT=text +LOG_FILE=/app/logs/server.log +LOG_FILE_MAX_SIZE=20 +LOG_FILE_BACKUP_COUNT=5 +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +LOG_TZ=UTC +DEBUG=false +FLASK_DEBUG=false +ENABLE_REQUEST_LOGGING=False DIFY_BIND_ADDRESS=0.0.0.0 - -# API service binding port number, default 5001. DIFY_PORT=5001 - -# The number of API server workers, i.e., the number of workers. -# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent -# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers SERVER_WORKER_AMOUNT=1 - -# Defaults to gevent. If using windows, it can be switched to sync or solo. -# -# Warning: Changing this parameter requires disabling patching for -# psycopg2 and gRPC (see `gunicorn.conf.py` and `celery_entrypoint.py`). -# Modifying it may also decrease throughput. -# -# It is strongly discouraged to change this parameter. -# If enable collaboration mode, it must be set to geventwebsocket.gunicorn.workers.GeventWebSocketWorker SERVER_WORKER_CLASS=gevent - -# Default number of worker connections, the default is 10. SERVER_WORKER_CONNECTIONS=10 - -# Similar to SERVER_WORKER_CLASS. -# If using windows, it can be switched to sync or solo. -# -# Warning: Changing this parameter requires disabling patching for -# psycopg2 and gRPC (see `gunicorn_conf.py` and `celery_entrypoint.py`). -# Modifying it may also decrease throughput. -# -# It is strongly discouraged to change this parameter. -CELERY_WORKER_CLASS= - -# Request handling timeout. The default is 200, -# it is recommended to set it to 360 to support a longer sse connection time. GUNICORN_TIMEOUT=360 - -# The number of Celery workers. The default is 4 for development environments -# to allow parallel processing of workflows, document indexing, and other async tasks. -# Adjust based on your system resources and workload requirements. +CELERY_WORKER_CLASS= CELERY_WORKER_AMOUNT=4 - -# Flag indicating whether to enable autoscaling of Celery workers. -# -# Autoscaling is useful when tasks are CPU intensive and can be dynamically -# allocated and deallocated based on the workload. -# -# When autoscaling is enabled, the maximum and minimum number of workers can -# be specified. The autoscaling algorithm will dynamically adjust the number -# of workers within the specified range. -# -# Default is false (i.e., autoscaling is disabled). -# -# Example: -# CELERY_AUTO_SCALE=true CELERY_AUTO_SCALE=false - -# The maximum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. CELERY_MAX_WORKERS= - -# The minimum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. CELERY_MIN_WORKERS= +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s -# API Tool configuration -API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 -API_TOOL_DEFAULT_READ_TIMEOUT=60 - -# ------------------------------- -# Datasource Configuration -# -------------------------------- -ENABLE_WEBSITE_JINAREADER=true -ENABLE_WEBSITE_FIRECRAWL=true -ENABLE_WEBSITE_WATERCRAWL=true - -# Enable inline LaTeX rendering with single dollar signs ($...$) in the web frontend -# Default is false for security reasons to prevent conflicts with regular text -NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false - -# ------------------------------ -# Database Configuration -# The database uses PostgreSQL or MySQL. OceanBase and seekdb are also supported. Please use the public schema. -# It is consistent with the configuration in the database service below. -# You can adjust the database configuration according to your needs. -# ------------------------------ - -# Database type, supported values are `postgresql`, `mysql`, `oceanbase`, `seekdb` +# Database DB_TYPE=postgresql -# For MySQL, only `root` user is supported for now DB_USERNAME=postgres DB_PASSWORD=difyai123456 DB_HOST=db_postgres DB_PORT=5432 DB_DATABASE=dify - -# The size of the database connection pool. -# The default is 30 connections, which can be appropriately increased. SQLALCHEMY_POOL_SIZE=30 -# The default is 10 connections, which allows temporary overflow beyond the pool size. SQLALCHEMY_MAX_OVERFLOW=10 -# Database connection pool recycling time, the default is 3600 seconds. SQLALCHEMY_POOL_RECYCLE=3600 -# Whether to print SQL, default is false. SQLALCHEMY_ECHO=false -# If True, will test connections for liveness upon each checkout SQLALCHEMY_POOL_PRE_PING=false -# Whether to enable the Last in first out option or use default FIFO queue if is false SQLALCHEMY_POOL_USE_LIFO=false -# Number of seconds to wait for a connection from the pool before raising a timeout error. -# Default is 30 SQLALCHEMY_POOL_TIMEOUT=30 - -# Maximum number of connections to the database -# Default is 100 -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback +PGDATA=/var/lib/postgresql/data/pgdata POSTGRES_MAX_CONNECTIONS=200 - -# Sets the amount of shared memory used for postgres's shared buffers. -# Default is 128MB -# Recommended value: 25% of available memory -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS POSTGRES_SHARED_BUFFERS=128MB - -# Sets the amount of memory used by each database worker for working space. -# Default is 4MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM POSTGRES_WORK_MEM=4MB - -# Sets the amount of memory reserved for maintenance activities. -# Default is 64MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM POSTGRES_MAINTENANCE_WORK_MEM=64MB - -# Sets the planner's assumption about the effective cache size. -# Default is 4096MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB - -# Sets the maximum allowed duration of any statement before termination. -# Default is 0 (no timeout). -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT -# A value of 0 prevents the server from timing out statements. POSTGRES_STATEMENT_TIMEOUT=0 - -# Sets the maximum allowed duration of any idle in-transaction session before termination. -# Default is 0 (no timeout). -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT -# A value of 0 prevents the server from terminating idle sessions. POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0 -# MySQL Performance Configuration -# Maximum number of connections to MySQL -# -# Default is 1000 -MYSQL_MAX_CONNECTIONS=1000 - -# InnoDB buffer pool size -# Default is 512M -# Recommended value: 70-80% of available memory for dedicated MySQL server -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size -MYSQL_INNODB_BUFFER_POOL_SIZE=512M - -# InnoDB log file size -# Default is 128M -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size -MYSQL_INNODB_LOG_FILE_SIZE=128M - -# InnoDB flush log at transaction commit -# Default is 2 (flush to OS cache, sync every second) -# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache) -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit -MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2 - -# ------------------------------ -# Redis Configuration -# This Redis configuration is used for caching and for pub/sub during conversation. -# ------------------------------ - +# Redis and Celery REDIS_HOST=redis REDIS_PORT=6379 REDIS_USERNAME= REDIS_PASSWORD=difyai123456 REDIS_USE_SSL=false -# SSL configuration for Redis (when REDIS_USE_SSL=true) REDIS_SSL_CERT_REQS=CERT_NONE -# Options: CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED REDIS_SSL_CA_CERTS= -# Path to CA certificate file for SSL verification REDIS_SSL_CERTFILE= -# Path to client certificate file for SSL authentication REDIS_SSL_KEYFILE= -# Path to client private key file for SSL authentication REDIS_DB=0 -# Optional global prefix for Redis keys, topics, streams, and Celery Redis transport artifacts. -# Leave empty to preserve current unprefixed behavior. REDIS_KEY_PREFIX= -# Optional: limit total Redis connections used by API/Worker (unset for default) -# Align with API's REDIS_MAX_CONNECTIONS in configs REDIS_MAX_CONNECTIONS= - -# Whether to use Redis Sentinel mode. -# If set to true, the application will automatically discover and connect to the master node through Sentinel. -REDIS_USE_SENTINEL=false - -# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. -# Format: `:,:,:` -REDIS_SENTINELS= -REDIS_SENTINEL_SERVICE_NAME= -REDIS_SENTINEL_USERNAME= -REDIS_SENTINEL_PASSWORD= -REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 - -# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. -# Format: `:,:,:` -REDIS_USE_CLUSTERS=false -REDIS_CLUSTERS= -REDIS_CLUSTERS_PASSWORD= - -# Redis connection and retry configuration -# max redis retry REDIS_RETRY_RETRIES=3 -# Base delay (in seconds) for exponential backoff on retries REDIS_RETRY_BACKOFF_BASE=1.0 -# Cap (in seconds) for exponential backoff on retries REDIS_RETRY_BACKOFF_CAP=10.0 -# Timeout (in seconds) for Redis socket operations REDIS_SOCKET_TIMEOUT=5.0 -# Timeout (in seconds) for establishing a Redis connection REDIS_SOCKET_CONNECT_TIMEOUT=5.0 -# Interval (in seconds) for Redis health checks REDIS_HEALTH_CHECK_INTERVAL=30 - -# ------------------------------ -# Celery Configuration -# ------------------------------ - -# Use standalone redis as the broker, and redis db 1 for celery broker. (redis_username is usually set by default as empty) -# Format as follows: `redis://:@:/`. -# Example: redis://:difyai123456@redis:6379/1 -# If use Redis Sentinel, format as follows: `sentinel://:@:/` -# For high availability, you can configure multiple Sentinel nodes (if provided) separated by semicolons like below example: -# Example: sentinel://:difyai123456@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1 CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 CELERY_BACKEND=redis BROKER_USE_SSL=false - -# If you are using Redis Sentinel for high availability, configure the following settings. -CELERY_USE_SENTINEL=false -CELERY_SENTINEL_MASTER_NAME= -CELERY_SENTINEL_PASSWORD= -CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 -# e.g. {"tasks.add": {"rate_limit": "10/s"}} CELERY_TASK_ANNOTATIONS=null +EVENT_BUS_REDIS_URL= +EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub +EVENT_BUS_REDIS_USE_CLUSTERS=false -# ------------------------------ -# CORS Configuration -# Used to set the front-end cross-domain access policy. -# ------------------------------ - -# Specifies the allowed origins for cross-origin requests to the Web API, -# e.g. https://dify.app or * for all origins. +# Web and app limits WEB_API_CORS_ALLOW_ORIGINS=* - -# Specifies the allowed origins for cross-origin requests to the console API, -# e.g. https://cloud.dify.ai or * for all origins. CONSOLE_CORS_ALLOW_ORIGINS=* -# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site's top-level domain (e.g., `example.com`). Leading dots are optional. COOKIE_DOMAIN= -# When the frontend and backend run on different subdomains, set NEXT_PUBLIC_COOKIE_DOMAIN=1. NEXT_PUBLIC_COOKIE_DOMAIN= -# WebSocket server URL. -NEXT_PUBLIC_SOCKET_URL=ws://localhost NEXT_PUBLIC_BATCH_CONCURRENCY=5 +API_SENTRY_DSN= +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 +WEB_SENTRY_DSN= +AMPLITUDE_API_KEY= +TEXT_GENERATION_TIMEOUT_MS=60000 +CSP_WHITELIST= +ALLOW_EMBED=false +ALLOW_INLINE_STYLES=false +ALLOW_UNSAFE_DATA_SCHEME=false +TOP_K_MAX_VALUE=10 +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 +LOOP_NODE_MAX_COUNT=100 +MAX_TOOLS_NUM=10 +MAX_PARALLEL_LIMIT=10 +MAX_ITERATIONS_NUM=99 +MAX_TREE_DEPTH=50 +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true +NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false +EXPERIMENTAL_ENABLE_VINEXT=false -# ------------------------------ -# File Storage Configuration -# ------------------------------ - -# The type of storage to use for storing user files. +# Storage and default vector store STORAGE_TYPE=opendal - -# Apache OpenDAL Configuration -# The configuration for OpenDAL consists of the following format: OPENDAL__. -# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. -# Dify will scan configurations starting with OPENDAL_ and automatically apply them. -# The scheme name for the OpenDAL storage. OPENDAL_SCHEME=fs -# Configurations for OpenDAL Local File System. OPENDAL_FS_ROOT=storage - -# ClickZetta Volume Configuration (for storage backend) -# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume -# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters - -# Volume type selection (three types available): -# - user: Personal/small team use, simple config, user-level permissions -# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions -# - external: Data lake integration, external storage connection, volume-level + storage-level permissions -CLICKZETTA_VOLUME_TYPE=user - -# External Volume name (required only when TYPE=external) -CLICKZETTA_VOLUME_NAME= - -# Table Volume table prefix (used only when TYPE=table) -CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ - -# Dify file directory prefix (isolates from other apps, recommended to keep default) -CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km - -# S3 Configuration -# -S3_ENDPOINT= -S3_REGION=us-east-1 -S3_BUCKET_NAME=difyai -S3_ACCESS_KEY= -S3_SECRET_KEY= -S3_ADDRESS_STYLE=auto -# Whether to use AWS managed IAM roles for authenticating with the S3 service. -# If set to false, the access key and secret key must be provided. -S3_USE_AWS_MANAGED_IAM=false - -# Workflow run and Conversation archive storage (S3-compatible) -ARCHIVE_STORAGE_ENABLED=false -ARCHIVE_STORAGE_ENDPOINT= -ARCHIVE_STORAGE_ARCHIVE_BUCKET= -ARCHIVE_STORAGE_EXPORT_BUCKET= -ARCHIVE_STORAGE_ACCESS_KEY= -ARCHIVE_STORAGE_SECRET_KEY= -ARCHIVE_STORAGE_REGION=auto - -# Azure Blob Configuration -# -AZURE_BLOB_ACCOUNT_NAME=difyai -AZURE_BLOB_ACCOUNT_KEY=difyai -AZURE_BLOB_CONTAINER_NAME=difyai-container -AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net - -# Google Storage Configuration -# -GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name -GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= - -# The Alibaba Cloud OSS configurations, -# -ALIYUN_OSS_BUCKET_NAME=your-bucket-name -ALIYUN_OSS_ACCESS_KEY=your-access-key -ALIYUN_OSS_SECRET_KEY=your-secret-key -ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com -ALIYUN_OSS_REGION=ap-southeast-1 -ALIYUN_OSS_AUTH_VERSION=v4 -# Don't start with '/'. OSS doesn't support leading slash in object names. -ALIYUN_OSS_PATH=your-path -# Optional CloudBox ID for Aliyun OSS, DO NOT enable it if you are not using CloudBox. -#ALIYUN_CLOUDBOX_ID=your-cloudbox-id - -# Tencent COS Configuration -# -TENCENT_COS_BUCKET_NAME=your-bucket-name -TENCENT_COS_SECRET_KEY=your-secret-key -TENCENT_COS_SECRET_ID=your-secret-id -TENCENT_COS_REGION=your-region -TENCENT_COS_SCHEME=your-scheme -TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain - -# Oracle Storage Configuration -# -OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com -OCI_BUCKET_NAME=your-bucket-name -OCI_ACCESS_KEY=your-access-key -OCI_SECRET_KEY=your-secret-key -OCI_REGION=us-ashburn-1 - -# Huawei OBS Configuration -# -HUAWEI_OBS_BUCKET_NAME=your-bucket-name -HUAWEI_OBS_SECRET_KEY=your-secret-key -HUAWEI_OBS_ACCESS_KEY=your-access-key -HUAWEI_OBS_SERVER=your-server-url -HUAWEI_OBS_PATH_STYLE=false - -# Volcengine TOS Configuration -# -VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name -VOLCENGINE_TOS_SECRET_KEY=your-secret-key -VOLCENGINE_TOS_ACCESS_KEY=your-access-key -VOLCENGINE_TOS_ENDPOINT=your-server-url -VOLCENGINE_TOS_REGION=your-region - -# Baidu OBS Storage Configuration -# -BAIDU_OBS_BUCKET_NAME=your-bucket-name -BAIDU_OBS_SECRET_KEY=your-secret-key -BAIDU_OBS_ACCESS_KEY=your-access-key -BAIDU_OBS_ENDPOINT=your-server-url - -# Supabase Storage Configuration -# -SUPABASE_BUCKET_NAME=your-bucket-name -SUPABASE_API_KEY=your-access-key -SUPABASE_URL=your-server-url - -# ------------------------------ -# Vector Database Configuration -# ------------------------------ - -# The type of vector store to use. -# Supported values are `weaviate`, `oceanbase`, `seekdb`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`, `vastbase`, `tidb`, `tidb_on_qdrant`, `baidu`, `lindorm`, `huawei_cloud`, `upstash`, `matrixone`, `clickzetta`, `alibabacloud_mysql`, `iris`, `hologres`. VECTOR_STORE=weaviate -# Prefix used to create collection name in vector database VECTOR_INDEX_NAME_PREFIX=Vector_index - -# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. WEAVIATE_ENDPOINT=http://weaviate:8080 WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051 WEAVIATE_TOKENIZATION=word - -# For OceanBase metadata database configuration, available when `DB_TYPE` is `oceanbase`. -# For OceanBase vector database configuration, available when `VECTOR_STORE` is `oceanbase` -# If you want to use OceanBase as both vector database and metadata database, you need to set both `DB_TYPE` and `VECTOR_STORE` to `oceanbase`, and set Database Configuration is the same as the vector database. -# seekdb is the lite version of OceanBase and shares the connection configuration with OceanBase. -OCEANBASE_VECTOR_HOST=oceanbase -OCEANBASE_VECTOR_PORT=2881 -OCEANBASE_VECTOR_USER=root@test -OCEANBASE_VECTOR_PASSWORD=difyai123456 -OCEANBASE_VECTOR_DATABASE=test -OCEANBASE_CLUSTER_NAME=difyai -OCEANBASE_MEMORY_LIMIT=6G -OCEANBASE_ENABLE_HYBRID_SEARCH=false -# For OceanBase vector database, built-in fulltext parsers are `ngram`, `beng`, `space`, `ngram2`, `ik` -# For OceanBase vector database, external fulltext parsers (require plugin installation) are `japanese_ftparser`, `thai_ftparser` -OCEANBASE_FULLTEXT_PARSER=ik -SEEKDB_MEMORY_LIMIT=2G - -# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. -QDRANT_URL=http://qdrant:6333 -QDRANT_API_KEY=difyai123456 -QDRANT_CLIENT_TIMEOUT=20 -QDRANT_GRPC_ENABLED=false -QDRANT_GRPC_PORT=6334 -QDRANT_REPLICATION_FACTOR=1 - -# Milvus configuration. Only available when VECTOR_STORE is `milvus`. -# The milvus uri. -MILVUS_URI=http://host.docker.internal:19530 -MILVUS_DATABASE= -MILVUS_TOKEN= -MILVUS_USER= -MILVUS_PASSWORD= -MILVUS_ENABLE_HYBRID_SEARCH=False -MILVUS_ANALYZER_PARAMS= - -# MyScale configuration, only available when VECTOR_STORE is `myscale` -# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: -# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters -MYSCALE_HOST=myscale -MYSCALE_PORT=8123 -MYSCALE_USER=default -MYSCALE_PASSWORD= -MYSCALE_DATABASE=dify -MYSCALE_FTS_PARAMS= - -# Couchbase configurations, only available when VECTOR_STORE is `couchbase` -# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) -COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server -COUCHBASE_USER=Administrator -COUCHBASE_PASSWORD=password -COUCHBASE_BUCKET_NAME=Embeddings -COUCHBASE_SCOPE_NAME=_default - -# Hologres configurations, only available when VECTOR_STORE is `hologres` -# access_key_id is used as the PG username, access_key_secret is used as the PG password -HOLOGRES_HOST= -HOLOGRES_PORT=80 -HOLOGRES_DATABASE= -HOLOGRES_ACCESS_KEY_ID= -HOLOGRES_ACCESS_KEY_SECRET= -HOLOGRES_SCHEMA=public -HOLOGRES_TOKENIZER=jieba -HOLOGRES_DISTANCE_METHOD=Cosine -HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq -HOLOGRES_MAX_DEGREE=64 -HOLOGRES_EF_CONSTRUCTION=400 - -# pgvector configurations, only available when VECTOR_STORE is `pgvector` -PGVECTOR_HOST=pgvector -PGVECTOR_PORT=5432 -PGVECTOR_USER=postgres -PGVECTOR_PASSWORD=difyai123456 -PGVECTOR_DATABASE=dify -PGVECTOR_MIN_CONNECTION=1 -PGVECTOR_MAX_CONNECTION=5 -PGVECTOR_PG_BIGM=false -PGVECTOR_PG_BIGM_VERSION=1.2-20240606 - -# vastbase configurations, only available when VECTOR_STORE is `vastbase` -VASTBASE_HOST=vastbase -VASTBASE_PORT=5432 -VASTBASE_USER=dify -VASTBASE_PASSWORD=Difyai123456 -VASTBASE_DATABASE=dify -VASTBASE_MIN_CONNECTION=1 -VASTBASE_MAX_CONNECTION=5 - -# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` -PGVECTO_RS_HOST=pgvecto-rs -PGVECTO_RS_PORT=5432 -PGVECTO_RS_USER=postgres -PGVECTO_RS_PASSWORD=difyai123456 -PGVECTO_RS_DATABASE=dify - -# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` -ANALYTICDB_KEY_ID=your-ak -ANALYTICDB_KEY_SECRET=your-sk -ANALYTICDB_REGION_ID=cn-hangzhou -ANALYTICDB_INSTANCE_ID=gp-ab123456 -ANALYTICDB_ACCOUNT=testaccount -ANALYTICDB_PASSWORD=testpassword -ANALYTICDB_NAMESPACE=dify -ANALYTICDB_NAMESPACE_PASSWORD=difypassword -ANALYTICDB_HOST=gp-test.aliyuncs.com -ANALYTICDB_PORT=5432 -ANALYTICDB_MIN_CONNECTION=1 -ANALYTICDB_MAX_CONNECTION=5 - -# TiDB vector configurations, only available when VECTOR_STORE is `tidb_vector` -TIDB_VECTOR_HOST=tidb -TIDB_VECTOR_PORT=4000 -TIDB_VECTOR_USER= -TIDB_VECTOR_PASSWORD= -TIDB_VECTOR_DATABASE=dify - -# Matrixone vector configurations. -MATRIXONE_HOST=matrixone -MATRIXONE_PORT=6001 -MATRIXONE_USER=dump -MATRIXONE_PASSWORD=111 -MATRIXONE_DATABASE=dify - -# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` -TIDB_ON_QDRANT_URL=http://127.0.0.1 -TIDB_ON_QDRANT_API_KEY=dify -TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 -TIDB_ON_QDRANT_GRPC_ENABLED=false -TIDB_ON_QDRANT_GRPC_PORT=6334 -TIDB_PUBLIC_KEY=dify -TIDB_PRIVATE_KEY=dify -TIDB_API_URL=http://127.0.0.1 -TIDB_IAM_API_URL=http://127.0.0.1 -TIDB_REGION=regions/aws-us-east-1 -TIDB_PROJECT_ID=dify -TIDB_SPEND_LIMIT=100 - -# Chroma configuration, only available when VECTOR_STORE is `chroma` -CHROMA_HOST=127.0.0.1 -CHROMA_PORT=8000 -CHROMA_TENANT=default_tenant -CHROMA_DATABASE=default_database -CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider -CHROMA_AUTH_CREDENTIALS= - -# Oracle configuration, only available when VECTOR_STORE is `oracle` -ORACLE_USER=dify -ORACLE_PASSWORD=dify -ORACLE_DSN=oracle:1521/FREEPDB1 -ORACLE_CONFIG_DIR=/app/api/storage/wallet -ORACLE_WALLET_LOCATION=/app/api/storage/wallet -ORACLE_WALLET_PASSWORD=dify -ORACLE_IS_AUTONOMOUS=false - -# AlibabaCloud MySQL configuration, only available when VECTOR_STORE is `alibabcloud_mysql` -ALIBABACLOUD_MYSQL_HOST=127.0.0.1 -ALIBABACLOUD_MYSQL_PORT=3306 -ALIBABACLOUD_MYSQL_USER=root -ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 -ALIBABACLOUD_MYSQL_DATABASE=dify -ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 -ALIBABACLOUD_MYSQL_HNSW_M=6 - -# relyt configurations, only available when VECTOR_STORE is `relyt` -RELYT_HOST=db -RELYT_PORT=5432 -RELYT_USER=postgres -RELYT_PASSWORD=difyai123456 -RELYT_DATABASE=postgres - -# open search configuration, only available when VECTOR_STORE is `opensearch` -OPENSEARCH_HOST=opensearch -OPENSEARCH_PORT=9200 -OPENSEARCH_SECURE=true -OPENSEARCH_VERIFY_CERTS=true -OPENSEARCH_AUTH_METHOD=basic -OPENSEARCH_USER=admin -OPENSEARCH_PASSWORD=admin -# If using AWS managed IAM, e.g. Managed Cluster or OpenSearch Serverless -OPENSEARCH_AWS_REGION=ap-southeast-1 -OPENSEARCH_AWS_SERVICE=aoss - -# tencent vector configurations, only available when VECTOR_STORE is `tencent` -TENCENT_VECTOR_DB_URL=http://127.0.0.1 -TENCENT_VECTOR_DB_API_KEY=dify -TENCENT_VECTOR_DB_TIMEOUT=30 -TENCENT_VECTOR_DB_USERNAME=dify -TENCENT_VECTOR_DB_DATABASE=dify -TENCENT_VECTOR_DB_SHARD=1 -TENCENT_VECTOR_DB_REPLICAS=2 -TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false - -# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` -ELASTICSEARCH_HOST=0.0.0.0 -ELASTICSEARCH_PORT=9200 -ELASTICSEARCH_USERNAME=elastic -ELASTICSEARCH_PASSWORD=elastic -KIBANA_PORT=5601 - -# Using ElasticSearch Cloud Serverless, or not. -ELASTICSEARCH_USE_CLOUD=false -ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL -ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY - -ELASTICSEARCH_VERIFY_CERTS=False -ELASTICSEARCH_CA_CERTS= -ELASTICSEARCH_REQUEST_TIMEOUT=100000 -ELASTICSEARCH_RETRY_ON_TIMEOUT=True -ELASTICSEARCH_MAX_RETRIES=10 - -# baidu vector configurations, only available when VECTOR_STORE is `baidu` -BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 -BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 -BAIDU_VECTOR_DB_ACCOUNT=root -BAIDU_VECTOR_DB_API_KEY=dify -BAIDU_VECTOR_DB_DATABASE=dify -BAIDU_VECTOR_DB_SHARD=1 -BAIDU_VECTOR_DB_REPLICAS=3 -BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER -BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE -BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500 -BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05 -BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300 - -# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` -VIKINGDB_ACCESS_KEY=your-ak -VIKINGDB_SECRET_KEY=your-sk -VIKINGDB_REGION=cn-shanghai -VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http -VIKINGDB_CONNECTION_TIMEOUT=30 -VIKINGDB_SOCKET_TIMEOUT=30 - -# Lindorm configuration, only available when VECTOR_STORE is `lindorm` -LINDORM_URL=http://localhost:30070 -LINDORM_USERNAME=admin -LINDORM_PASSWORD=admin -LINDORM_USING_UGC=True -LINDORM_QUERY_TIMEOUT=1 - -# opengauss configurations, only available when VECTOR_STORE is `opengauss` -OPENGAUSS_HOST=opengauss -OPENGAUSS_PORT=6600 -OPENGAUSS_USER=postgres -OPENGAUSS_PASSWORD=Dify@123 -OPENGAUSS_DATABASE=dify -OPENGAUSS_MIN_CONNECTION=1 -OPENGAUSS_MAX_CONNECTION=5 -OPENGAUSS_ENABLE_PQ=false - -# huawei cloud search service vector configurations, only available when VECTOR_STORE is `huawei_cloud` -HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 -HUAWEI_CLOUD_USER=admin -HUAWEI_CLOUD_PASSWORD=admin - -# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` -UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io -UPSTASH_VECTOR_TOKEN=dify - -# TableStore Vector configuration -# (only used when VECTOR_STORE is tablestore) -TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com -TABLESTORE_INSTANCE_NAME=instance-name -TABLESTORE_ACCESS_KEY_ID=xxx -TABLESTORE_ACCESS_KEY_SECRET=xxx -TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false - -# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` -CLICKZETTA_USERNAME= -CLICKZETTA_PASSWORD= -CLICKZETTA_INSTANCE= -CLICKZETTA_SERVICE=api.clickzetta.com -CLICKZETTA_WORKSPACE=quick_start -CLICKZETTA_VCLUSTER=default_ap -CLICKZETTA_SCHEMA=dify -CLICKZETTA_BATCH_SIZE=100 -CLICKZETTA_ENABLE_INVERTED_INDEX=true -CLICKZETTA_ANALYZER_TYPE=chinese -CLICKZETTA_ANALYZER_MODE=smart -CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance - -# InterSystems IRIS configuration, only available when VECTOR_STORE is `iris` -IRIS_HOST=iris -IRIS_SUPER_SERVER_PORT=1972 -IRIS_WEB_SERVER_PORT=52773 -IRIS_USER=_SYSTEM -IRIS_PASSWORD=Dify@1234 -IRIS_DATABASE=USER -IRIS_SCHEMA=dify -IRIS_CONNECTION_URL= -IRIS_MIN_CONNECTION=1 -IRIS_MAX_CONNECTION=3 -IRIS_TEXT_INDEX=true -IRIS_TEXT_INDEX_LANGUAGE=en -IRIS_TIMEZONE=UTC - -# ------------------------------ -# Knowledge Configuration -# ------------------------------ - -# Upload file size limit, default 15M. -UPLOAD_FILE_SIZE_LIMIT=15 - -# The maximum number of files that can be uploaded at a time, default 5. -UPLOAD_FILE_BATCH_LIMIT=5 - -# Comma-separated list of file extensions blocked from upload for security reasons. -# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll). -# Empty by default to allow all file types. -# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll -UPLOAD_FILE_EXTENSION_BLACKLIST= - -# Maximum number of files allowed in a single chunk attachment, default 10. -SINGLE_CHUNK_ATTACHMENT_LIMIT=10 - -# Maximum number of files allowed in a image batch upload operation -IMAGE_FILE_BATCH_LIMIT=10 - -# Maximum allowed image file size for attachments in megabytes, default 2. -ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2 - -# Timeout for downloading image attachments in seconds, default 60. -ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60 - - -# ETL type, support: `dify`, `Unstructured` -# `dify` Dify's proprietary file extraction scheme -# `Unstructured` Unstructured.io file extraction scheme -ETL_TYPE=dify - -# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured -# Or using Unstructured for document extractor node for pptx. -# For example: http://unstructured:8000/general/v0/general -UNSTRUCTURED_API_URL= -UNSTRUCTURED_API_KEY= -SCARF_NO_ANALYTICS=true - -# ------------------------------ -# Model Configuration -# ------------------------------ - -# The maximum number of tokens allowed for prompt generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating a prompt in the prompt generation tool. -# Default: 512 tokens. -PROMPT_GENERATION_MAX_TOKENS=512 - -# The maximum number of tokens allowed for code generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating code in the code generation tool. -# Default: 1024 tokens. -CODE_GENERATION_MAX_TOKENS=1024 - -# Enable or disable plugin based token counting. If disabled, token counting will return 0. -# This can improve performance by skipping token counting operations. -# Default: false (disabled). -PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false - -# ------------------------------ -# Multi-modal Configuration -# ------------------------------ - -# The format of the image/video/audio/document sent when the multi-modal model is input, -# the default is base64, optional url. -# The delay of the call in url mode will be lower than that in base64 mode. -# It is generally recommended to use the more compatible base64 mode. -# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. -MULTIMODAL_SEND_FORMAT=base64 -# Upload image file size limit, default 10M. -UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 -# Upload video file size limit, default 100M. -UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 -# Upload audio file size limit, default 50M. -UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 - -# ------------------------------ -# Sentry Configuration -# Used for application monitoring and error log tracking. -# ------------------------------ -SENTRY_DSN= - -# API Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -API_SENTRY_DSN= -# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. -API_SENTRY_TRACES_SAMPLE_RATE=1.0 -# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. -API_SENTRY_PROFILES_SAMPLE_RATE=1.0 - -# Web Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -WEB_SENTRY_DSN= - -# Plugin_daemon Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -PLUGIN_SENTRY_ENABLED=false -PLUGIN_SENTRY_DSN= - -# ------------------------------ -# Notion Integration Configuration -# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations -# ------------------------------ - -# Configure as "public" or "internal". -# Since Notion's OAuth redirect URL only supports HTTPS, -# if deploying locally, please use Notion's internal integration. -NOTION_INTEGRATION_TYPE=public -# Notion OAuth client secret (used for public integration type) -NOTION_CLIENT_SECRET= -# Notion OAuth client id (used for public integration type) -NOTION_CLIENT_ID= -# Notion internal integration secret. -# If the value of NOTION_INTEGRATION_TYPE is "internal", -# you need to configure this variable. -NOTION_INTERNAL_SECRET= - -# ------------------------------ -# Mail related configuration -# ------------------------------ - -# Mail type, support: resend, smtp, sendgrid -MAIL_TYPE=resend - -# Default send from email address, if not specified -# If using SendGrid, use the 'from' field for authentication if necessary. -MAIL_DEFAULT_SEND_FROM= - -# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. -RESEND_API_URL=https://api.resend.com -RESEND_API_KEY=your-resend-api-key - - -# SMTP server configuration, used when MAIL_TYPE is `smtp` -SMTP_SERVER= -SMTP_PORT=465 -SMTP_USERNAME= -SMTP_PASSWORD= -SMTP_USE_TLS=true -SMTP_OPPORTUNISTIC_TLS=false -# Optional: override the local hostname used for SMTP HELO/EHLO -SMTP_LOCAL_HOSTNAME= - -# Sendgid configuration -SENDGRID_API_KEY= - -# ------------------------------ -# Others Configuration -# ------------------------------ - -# Maximum length of segmentation tokens for indexing -INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 - -# Member invitation link valid time (hours), -# Default: 72. -INVITE_EXPIRY_HOURS=72 - -# Reset password token valid time (minutes), -RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 -EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 -CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 -OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 - -# The sandbox service endpoint. -CODE_EXECUTION_ENDPOINT=http://sandbox:8194 -CODE_EXECUTION_API_KEY=dify-sandbox -CODE_EXECUTION_SSL_VERIFY=True -CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 -CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 -CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 -CODE_MAX_NUMBER=9223372036854775807 -CODE_MIN_NUMBER=-9223372036854775808 -CODE_MAX_DEPTH=5 -CODE_MAX_PRECISION=20 -CODE_MAX_STRING_LENGTH=400000 -CODE_MAX_STRING_ARRAY_LENGTH=30 -CODE_MAX_OBJECT_ARRAY_LENGTH=30 -CODE_MAX_NUMBER_ARRAY_LENGTH=1000 -CODE_EXECUTION_CONNECT_TIMEOUT=10 -CODE_EXECUTION_READ_TIMEOUT=60 -CODE_EXECUTION_WRITE_TIMEOUT=10 -TEMPLATE_TRANSFORM_MAX_LENGTH=400000 - -# Workflow runtime configuration -WORKFLOW_MAX_EXECUTION_STEPS=500 -WORKFLOW_MAX_EXECUTION_TIME=1200 -WORKFLOW_CALL_MAX_DEPTH=5 -MAX_VARIABLE_SIZE=204800 -WORKFLOW_FILE_UPLOAD_LIMIT=10 - -# GraphEngine Worker Pool Configuration -# Minimum number of workers per GraphEngine instance (default: 1) -GRAPH_ENGINE_MIN_WORKERS=1 -# Maximum number of workers per GraphEngine instance (default: 10) -GRAPH_ENGINE_MAX_WORKERS=10 -# Queue depth threshold that triggers worker scale up (default: 3) -GRAPH_ENGINE_SCALE_UP_THRESHOLD=3 -# Seconds of idle time before scaling down workers (default: 5.0) -GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0 - -# Workflow storage configuration -# Options: rdbms, hybrid -# rdbms: Use only the relational database (default) -# hybrid: Save new data to object storage, read from both object storage and RDBMS -WORKFLOW_NODE_EXECUTION_STORAGE=rdbms - -# Repository configuration -# Core workflow execution repository implementation -# Options: -# - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default) -# - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository -# - extensions.logstore.repositories.logstore_workflow_execution_repository.LogstoreWorkflowExecutionRepository -CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository - -# Core workflow node execution repository implementation -# Options: -# - core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository (default) -# - core.repositories.celery_workflow_node_execution_repository.CeleryWorkflowNodeExecutionRepository -# - extensions.logstore.repositories.logstore_workflow_node_execution_repository.LogstoreWorkflowNodeExecutionRepository -CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository - -# API workflow run repository implementation -# Options: -# - repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository (default) -# - extensions.logstore.repositories.logstore_api_workflow_run_repository.LogstoreAPIWorkflowRunRepository -API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository - -# API workflow node execution repository implementation -# Options: -# - repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository (default) -# - extensions.logstore.repositories.logstore_api_workflow_node_execution_repository.LogstoreAPIWorkflowNodeExecutionRepository -API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository - -# Workflow log cleanup configuration -# Enable automatic cleanup of workflow run logs to manage database size -WORKFLOW_LOG_CLEANUP_ENABLED=false -# Number of days to retain workflow run logs (default: 30 days) -WORKFLOW_LOG_RETENTION_DAYS=30 -# Batch size for workflow log cleanup operations (default: 100) -WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 -# Comma-separated list of workflow IDs to clean logs for -WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS= - -# Aliyun SLS Logstore Configuration -# Aliyun Access Key ID -ALIYUN_SLS_ACCESS_KEY_ID= -# Aliyun Access Key Secret -ALIYUN_SLS_ACCESS_KEY_SECRET= -# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com) -ALIYUN_SLS_ENDPOINT= -# Aliyun SLS Region (e.g., cn-hangzhou) -ALIYUN_SLS_REGION= -# Aliyun SLS Project Name -ALIYUN_SLS_PROJECT_NAME= -# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage) -ALIYUN_SLS_LOGSTORE_TTL=365 -# Enable dual-write to both SLS LogStore and SQL database (default: false) -LOGSTORE_DUAL_WRITE_ENABLED=false -# Enable dual-read fallback to SQL database when LogStore returns no results (default: true) -# Useful for migration scenarios where historical data exists only in SQL database -LOGSTORE_DUAL_READ_ENABLED=true -# Control flag for whether to write the `graph` field to LogStore. -# If LOGSTORE_ENABLE_PUT_GRAPH_FIELD is "true", write the full `graph` field; -# otherwise write an empty {} instead. Defaults to writing the `graph` field. -LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true - -# HTTP request node in workflow configuration -HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 -HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 -HTTP_REQUEST_NODE_SSL_VERIFY=True - -# HTTP request node timeout configuration -# Maximum timeout values (in seconds) that users can set in HTTP request nodes -# - Connect timeout: Time to wait for establishing connection (default: 10s) -# - Read timeout: Time to wait for receiving response data (default: 600s, 10 minutes) -# - Write timeout: Time to wait for sending request data (default: 600s, 10 minutes) -HTTP_REQUEST_MAX_CONNECT_TIMEOUT=10 -HTTP_REQUEST_MAX_READ_TIMEOUT=600 -HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 - -# Base64 encoded CA certificate data for custom certificate verification (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CERT_DATA=LS0tLS1CRUdJTi... -# Base64 encoded client certificate data for mutual TLS authentication (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CLIENT_CERT_DATA=LS0tLS1CRUdJTi... -# Base64 encoded client private key data for mutual TLS authentication (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CLIENT_KEY_DATA=LS0tLS1CRUdJTi... - -# Webhook request configuration -WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760 - -# Respect X-* headers to redirect clients -RESPECT_XFORWARD_HEADERS_ENABLED=false - -# SSRF Proxy server HTTP URL -SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 -# SSRF Proxy server HTTPS URL -SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 - -# Maximum loop count in the workflow -LOOP_NODE_MAX_COUNT=100 - -# The maximum number of tools that can be used in the agent. -MAX_TOOLS_NUM=10 - -# Maximum number of Parallelism branches in the workflow -MAX_PARALLEL_LIMIT=10 - -# The maximum number of iterations for agent setting -MAX_ITERATIONS_NUM=99 - -# ------------------------------ -# Environment Variables for web Service -# ------------------------------ - -# The timeout for the text generation in millisecond -TEXT_GENERATION_TIMEOUT_MS=60000 - -# Enable the experimental vinext runtime shipped in the image. -EXPERIMENTAL_ENABLE_VINEXT=false - -# Allow inline style attributes in Markdown rendering. -# Enable this if your workflows use Jinja2 templates with styled HTML. -# Only recommended for self-hosted deployments with trusted content. -ALLOW_INLINE_STYLES=false - -# Allow rendering unsafe URLs which have "data:" scheme. -ALLOW_UNSAFE_DATA_SCHEME=false - -# Maximum number of tree depth in the workflow -MAX_TREE_DEPTH=50 - -# ------------------------------ -# Environment Variables for database Service -# ------------------------------ -# Postgres data directory -PGDATA=/var/lib/postgresql/data/pgdata - -# MySQL Default Configuration -MYSQL_HOST_VOLUME=./volumes/mysql/data - -# ------------------------------ -# Environment Variables for sandbox Service -# ------------------------------ - -# The API key for the sandbox service -SANDBOX_API_KEY=dify-sandbox -# The mode in which the Gin framework runs -SANDBOX_GIN_MODE=release -# The timeout for the worker in seconds -SANDBOX_WORKER_TIMEOUT=15 -# Enable network for the sandbox service -SANDBOX_ENABLE_NETWORK=true -# HTTP proxy URL for SSRF protection -SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 -# HTTPS proxy URL for SSRF protection -SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 -# The port on which the sandbox service runs -SANDBOX_PORT=8194 - -# ------------------------------ -# Environment Variables for weaviate Service -# (only used when VECTOR_STORE is weaviate) -# ------------------------------ WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate WEAVIATE_QUERY_DEFAULTS_LIMIT=25 WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true @@ -1259,118 +167,26 @@ WEAVIATE_ENABLE_TOKENIZER_GSE=false WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false -# ------------------------------ -# Environment Variables for Chroma -# (only used when VECTOR_STORE is chroma) -# ------------------------------ - -# Authentication credentials for Chroma server -CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 -# Authentication provider for Chroma server -CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider -# Persistence setting for Chroma server -CHROMA_IS_PERSISTENT=TRUE - -# ------------------------------ -# Environment Variables for Oracle Service -# (only used when VECTOR_STORE is oracle) -# ------------------------------ -ORACLE_PWD=Dify123456 -ORACLE_CHARACTERSET=AL32UTF8 - -# ------------------------------ -# Environment Variables for milvus Service -# (only used when VECTOR_STORE is milvus) -# ------------------------------ -# ETCD configuration for auto compaction mode -ETCD_AUTO_COMPACTION_MODE=revision -# ETCD configuration for auto compaction retention in terms of number of revisions -ETCD_AUTO_COMPACTION_RETENTION=1000 -# ETCD configuration for backend quota in bytes -ETCD_QUOTA_BACKEND_BYTES=4294967296 -# ETCD configuration for the number of changes before triggering a snapshot -ETCD_SNAPSHOT_COUNT=50000 -# MinIO access key for authentication -MINIO_ACCESS_KEY=minioadmin -# MinIO secret key for authentication -MINIO_SECRET_KEY=minioadmin -# ETCD service endpoints -ETCD_ENDPOINTS=etcd:2379 -# MinIO service address -MINIO_ADDRESS=minio:9000 -# Enable or disable security authorization -MILVUS_AUTHORIZATION_ENABLED=true - -# ------------------------------ -# Environment Variables for pgvector / pgvector-rs Service -# (only used when VECTOR_STORE is pgvector / pgvector-rs) -# ------------------------------ -PGVECTOR_PGUSER=postgres -# The password for the default postgres user. -PGVECTOR_POSTGRES_PASSWORD=difyai123456 -# The name of the default postgres database. -PGVECTOR_POSTGRES_DB=dify -# postgres data directory -PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata - -# ------------------------------ -# Environment Variables for opensearch -# (only used when VECTOR_STORE is opensearch) -# ------------------------------ -OPENSEARCH_DISCOVERY_TYPE=single-node -OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true -OPENSEARCH_JAVA_OPTS_MIN=512m -OPENSEARCH_JAVA_OPTS_MAX=1024m -OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 -OPENSEARCH_MEMLOCK_SOFT=-1 -OPENSEARCH_MEMLOCK_HARD=-1 -OPENSEARCH_NOFILE_SOFT=65536 -OPENSEARCH_NOFILE_HARD=65536 - -# ------------------------------ -# Environment Variables for Nginx reverse proxy -# ------------------------------ -NGINX_SERVER_NAME=_ -NGINX_HTTPS_ENABLED=false -# HTTP port -NGINX_PORT=80 -# SSL settings are only applied when HTTPS_ENABLED is true -NGINX_SSL_PORT=443 -# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory -# and modify the env vars below accordingly. -NGINX_SSL_CERT_FILENAME=dify.crt -NGINX_SSL_CERT_KEY_FILENAME=dify.key -NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 - -# Nginx performance tuning -NGINX_WORKER_PROCESSES=auto -NGINX_CLIENT_MAX_BODY_SIZE=100M -NGINX_KEEPALIVE_TIMEOUT=65 - -# Proxy settings -NGINX_PROXY_READ_TIMEOUT=3600s -NGINX_PROXY_SEND_TIMEOUT=3600s - -# Set true to accept requests for /.well-known/acme-challenge/ -NGINX_ENABLE_CERTBOT_CHALLENGE=false - -# ------------------------------ -# Certbot Configuration -# ------------------------------ - -# Email address (required to get certificates from Let's Encrypt) -CERTBOT_EMAIL=your_email@example.com - -# Domain name -CERTBOT_DOMAIN=your_domain.com - -# certbot command options -# i.e: --force-renewal --dry-run --test-cert --debug -CERTBOT_OPTIONS= - -# ------------------------------ -# Environment Variables for SSRF Proxy -# ------------------------------ +# Sandbox and SSRF proxy +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_EXECUTION_SSL_VERIFY=True +CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 +CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 +PIP_MIRROR_URL= +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 SSRF_HTTP_PORT=3128 SSRF_COREDUMP_DIR=/var/spool/squid SSRF_REVERSE_PROXY_PORT=8194 @@ -1383,67 +199,7 @@ SSRF_POOL_MAX_CONNECTIONS=100 SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 SSRF_POOL_KEEPALIVE_EXPIRY=5.0 -# ------------------------------ -# docker env var for specifying vector db and metadata db type at startup -# (based on the vector db and metadata db type, the corresponding docker -# compose profile will be used) -# if you want to use unstructured, add ',unstructured' to the end -# ------------------------------ -COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} - -# ------------------------------ -# Worker health check configuration for worker and worker_beat services. -# Set to false to enable the health check. -# Note: enabling the health check may cause periodic CPU spikes and increased load, -# as it establishes a broker connection and sends a Celery ping on every check interval. -# ------------------------------ -COMPOSE_WORKER_HEALTHCHECK_DISABLED=true -# Interval between health checks (e.g. 30s, 1m) -COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s -# Timeout for each health check (e.g. 30s, 1m) -COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s - -# ------------------------------ -# Docker Compose Service Expose Host Port Configurations -# ------------------------------ -EXPOSE_NGINX_PORT=80 -EXPOSE_NGINX_SSL_PORT=443 - -# ---------------------------------------------------------------------------- -# ModelProvider & Tool Position Configuration -# Used to specify the model providers and tools that can be used in the app. -# ---------------------------------------------------------------------------- - -# Pin, include, and exclude tools -# Use comma-separated values with no spaces between items. -# Example: POSITION_TOOL_PINS=bing,google -POSITION_TOOL_PINS= -POSITION_TOOL_INCLUDES= -POSITION_TOOL_EXCLUDES= - -# Pin, include, and exclude model providers -# Use comma-separated values with no spaces between items. -# Example: POSITION_PROVIDER_PINS=openai,openllm -POSITION_PROVIDER_PINS= -POSITION_PROVIDER_INCLUDES= -POSITION_PROVIDER_EXCLUDES= - -# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP -CSP_WHITELIST= - -# Enable or disable create tidb service job -CREATE_TIDB_SERVICE_JOB_ENABLED=false - -# Maximum number of submitted thread count in a ThreadPool for parallel node execution -MAX_SUBMIT_COUNT=100 - -# The maximum number of top-k value for RAG. -TOP_K_MAX_VALUE=10 - -# ------------------------------ -# Plugin Daemon Configuration -# ------------------------------ - +# Plugin daemon DB_PLUGIN_DATABASE=dify_plugin EXPOSE_PLUGIN_DAEMON_PORT=5002 PLUGIN_DAEMON_PORT=5002 @@ -1452,180 +208,44 @@ PLUGIN_DAEMON_URL=http://plugin_daemon:5002 PLUGIN_MAX_PACKAGE_SIZE=52428800 PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600 PLUGIN_PPROF_ENABLED=false - PLUGIN_DEBUGGING_HOST=0.0.0.0 PLUGIN_DEBUGGING_PORT=5003 EXPOSE_PLUGIN_DEBUGGING_HOST=localhost EXPOSE_PLUGIN_DEBUGGING_PORT=5003 - -# If this key is changed, DIFY_INNER_API_KEY in plugin_daemon service must also be updated or agent node will fail. PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 PLUGIN_DIFY_INNER_API_URL=http://api:5001 - -ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} - -MARKETPLACE_ENABLED=true -MARKETPLACE_API_URL=https://marketplace.dify.ai - -# Creators Platform configuration -CREATORS_PLATFORM_FEATURES_ENABLED=true -CREATORS_PLATFORM_API_URL=https://creators.dify.ai -CREATORS_PLATFORM_OAUTH_CLIENT_ID= - FORCE_VERIFYING_SIGNATURE=true -ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES=true - PLUGIN_STDIO_BUFFER_SIZE=1024 PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 - PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 -# Plugin Daemon side timeout (configure to match the API side below) PLUGIN_MAX_EXECUTION_TIMEOUT=600 -# API side timeout (configure to match the Plugin Daemon side above) -PLUGIN_DAEMON_TIMEOUT=600.0 -# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple -PIP_MIRROR_URL= - -# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example -# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss volcengine_tos PLUGIN_STORAGE_TYPE=local PLUGIN_STORAGE_LOCAL_ROOT=/app/storage PLUGIN_WORKING_PATH=/app/storage/cwd PLUGIN_INSTALLED_PATH=plugin PLUGIN_PACKAGE_CACHE_PATH=plugin_packages PLUGIN_MEDIA_CACHE_PATH=assets -# Plugin oss bucket PLUGIN_STORAGE_OSS_BUCKET= -# Plugin oss s3 credentials -PLUGIN_S3_USE_AWS=false -PLUGIN_S3_USE_AWS_MANAGED_IAM=false -PLUGIN_S3_ENDPOINT= -PLUGIN_S3_USE_PATH_STYLE=false -PLUGIN_AWS_ACCESS_KEY= -PLUGIN_AWS_SECRET_KEY= -PLUGIN_AWS_REGION= -# Plugin oss azure blob -PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= -PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= -# Plugin oss tencent cos -PLUGIN_TENCENT_COS_SECRET_KEY= -PLUGIN_TENCENT_COS_SECRET_ID= -PLUGIN_TENCENT_COS_REGION= -# Plugin oss aliyun oss -PLUGIN_ALIYUN_OSS_REGION= -PLUGIN_ALIYUN_OSS_ENDPOINT= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= -PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 -PLUGIN_ALIYUN_OSS_PATH= -# Plugin oss volcengine tos -PLUGIN_VOLCENGINE_TOS_ENDPOINT= -PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= -PLUGIN_VOLCENGINE_TOS_SECRET_KEY= -PLUGIN_VOLCENGINE_TOS_REGION= +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai +MARKETPLACE_URL= -# ------------------------------ -# OTLP Collector Configuration -# ------------------------------ -ENABLE_OTEL=false -OTLP_TRACE_ENDPOINT= -OTLP_METRIC_ENDPOINT= -OTLP_BASE_ENDPOINT=http://localhost:4318 -OTLP_API_KEY= -OTEL_EXPORTER_OTLP_PROTOCOL= -OTEL_EXPORTER_TYPE=otlp -OTEL_SAMPLING_RATE=0.1 -OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 -OTEL_MAX_QUEUE_SIZE=2048 -OTEL_MAX_EXPORT_BATCH_SIZE=512 -OTEL_METRIC_EXPORT_INTERVAL=60000 -OTEL_BATCH_EXPORT_TIMEOUT=10000 -OTEL_METRIC_EXPORT_TIMEOUT=30000 - -# Prevent Clickjacking -ALLOW_EMBED=false - -# Dataset queue monitor configuration -QUEUE_MONITOR_THRESHOLD=200 -# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai -QUEUE_MONITOR_ALERT_EMAILS= -# Monitor interval in minutes, default is 30 minutes -QUEUE_MONITOR_INTERVAL=30 - -# Swagger UI configuration -SWAGGER_UI_ENABLED=false -SWAGGER_UI_PATH=/swagger-ui.html - -# Whether to encrypt dataset IDs when exporting DSL files (default: true) -# Set to false to export dataset IDs as plain text for easier cross-environment import -DSL_EXPORT_ENCRYPT_DATASET_ID=true - -# Maximum number of segments for dataset segments API (0 for unlimited) -DATASET_MAX_SEGMENTS_PER_REQUEST=0 - -# Celery schedule tasks configuration -ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false -ENABLE_CLEAN_UNUSED_DATASETS_TASK=false -ENABLE_CREATE_TIDB_SERVERLESS_TASK=false -ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false -ENABLE_CLEAN_MESSAGES=false -ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false -ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false -ENABLE_DATASETS_QUEUE_MONITOR=false -ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true -ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true -WORKFLOW_SCHEDULE_POLLER_INTERVAL=1 -WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100 -WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0 - -# Tenant isolated task queue configuration -TENANT_ISOLATED_TASK_CONCURRENCY=1 - -# Maximum allowed CSV file size for annotation import in megabytes -ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2 -#Maximum number of annotation records allowed in a single import -ANNOTATION_IMPORT_MAX_RECORDS=10000 -# Minimum number of annotation records required in a single import -ANNOTATION_IMPORT_MIN_RECORDS=1 -ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5 -ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20 -# Maximum number of concurrent annotation import tasks per tenant -ANNOTATION_IMPORT_MAX_CONCURRENT=5 - -# The API key of amplitude -AMPLITUDE_API_KEY= - -# Sandbox expired records clean configuration -SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 -SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 -SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200 -SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 - - -# Redis URL used for event bus between API and -# celery worker -# defaults to url constructed from `REDIS_*` -# configurations -EVENT_BUS_REDIS_URL= -# Event transport type. Options are: -# -# - pubsub: normal Pub/Sub (at-most-once) -# - sharded: sharded Pub/Sub (at-most-once) -# - streams: Redis Streams (at-least-once, recommended to avoid subscriber races) -# -# Note: Before enabling 'streams' in production, estimate your expected event volume and retention needs. -# Configure Redis memory limits and stream trimming appropriately (e.g., MAXLEN and key expiry) to reduce -# the risk of data loss from Redis auto-eviction under memory pressure. -# Also accepts ENV: EVENT_BUS_REDIS_CHANNEL_TYPE. -EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub -# Whether to use Redis cluster mode while use redis as event bus. -# It's highly recommended to enable this for large deployments. -EVENT_BUS_REDIS_USE_CLUSTERS=false - -# Whether to Enable human input timeout check task -ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true -# Human input timeout check interval in minutes -HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1 - - -SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 +# Nginx and Docker Compose +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +NGINX_PORT=80 +NGINX_SSL_PORT=443 +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s +NGINX_ENABLE_CERTBOT_CHALLENGE=false +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} diff --git a/docker/.gitignore b/docker/.gitignore new file mode 100644 index 0000000000..c3a47ad592 --- /dev/null +++ b/docker/.gitignore @@ -0,0 +1,3 @@ +# Ignore actual .env files (keep only .env.example files in git) +*.env +!*.env.example diff --git a/docker/README.md b/docker/README.md index 3130fa9886..a2d9b2eeba 100644 --- a/docker/README.md +++ b/docker/README.md @@ -7,29 +7,31 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T - **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\ For more information, refer `docker/certbot/README.md`. -- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments. +- **Persistent Environment Variables**: Essential startup defaults are provided in `.env.example`, while local values are stored in `.env`, ensuring that your configurations persist across deployments. > What is `.env`?

- > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments. + > The `.env` file is the local startup file. Copy it from `.env.example` for a default deployment. Optional advanced settings live in `envs/*.env.example` files. - **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file. -- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades. - ### How to Deploy Dify with `docker-compose.yaml` 1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system. 1. **Environment Setup**: - Navigate to the `docker` directory. - - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`. - - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options. - - **Optional (Recommended for upgrades)**: - You may use the environment synchronization tool to help keep your `.env` file aligned with the latest `.env.example` updates, while preserving your custom settings. - This is especially useful when upgrading Dify or managing a large, customized `.env` file. + - Copy `.env.example` to `.env`. + - Customize `.env` when you need to change essential startup defaults. Copy optional files from `envs/` without the `.example` suffix when you need advanced settings. + - **Optional (for advanced deployments)**: + If you maintain a full `.env` file copied from `.env.example`, you may use the environment synchronization tool to keep it aligned with the latest `.env.example` updates while preserving your custom settings. See the [Environment Variables Synchronization](#environment-variables-synchronization) section below. 1. **Running the Services**: - - Execute `docker compose up` from the `docker` directory to start the services. + - Execute `docker compose up -d` from the `docker` directory to start the services. - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. + ```bash + cp .env.example .env + docker compose up -d + ``` + 1. **SSL Certificate Setup**: - Refer `docker/certbot/README.md` to set up SSL certificates using Certbot. 1. **OpenTelemetry Collector Setup**: @@ -41,7 +43,7 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T 1. **Middleware Setup**: - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches. - Navigate to the `docker` directory. - - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file). + - Ensure the `middleware.env` file is created by running `cp envs/middleware.env.example middleware.env` (refer to the `envs/middleware.env.example` file). 1. **Running Middleware Services**: - Navigate to the `docker` directory. - Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance. @@ -58,7 +60,13 @@ For users migrating from the `docker-legacy` setup: 1. **Data Migration**: - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary. -### Overview of `.env` +### Overview of `.env`, `.env.example`, and `envs/` + +- `.env.example` contains the essential default configuration for Docker Compose deployments. +- `.env` contains local startup values copied from `.env.example` and any local changes. +- `envs/*.env.example` files contain optional advanced configuration grouped by theme. + +Docker Compose reads `envs/*.env` files when present, then reads `.env` last so values in `.env` take precedence. #### Key Modules and Customization @@ -68,7 +76,7 @@ For users migrating from the `docker-legacy` setup: #### Other notable variables -The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables: +The root `.env.example` file contains the essential startup settings. Optional and provider-specific settings are grouped in `envs/*.env.example` files. Here are some of the key sections and variables: 1. **Common Variables**: @@ -96,7 +104,7 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w 1. **Storage Configuration**: - - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc. + - `STORAGE_TYPE`, `OPENDAL_SCHEME`, `OPENDAL_FS_ROOT`: Default local file storage settings. Optional storage backends are configured from the files under `envs/`. 1. **Vector Database Configuration**: @@ -118,9 +126,11 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w ### Environment Variables Synchronization -When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example`. +When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example` or the optional files under `envs/`. -To help keep your existing `.env` file up to date **without losing your custom values**, an optional environment variables synchronization tool is provided. +If you use the default workflow, review `.env.example` and keep your `.env` aligned with essential startup values. + +If you maintain a customized `.env` file copied from `.env.example`, an optional environment variables synchronization tool is provided. > This tool performs a **one-way synchronization** from `.env.example` to `.env`. > Existing values in `.env` are never overwritten automatically. @@ -143,9 +153,9 @@ Before synchronization, the current `.env` file is saved to the `env-backup/` di **When to use** -- After upgrading Dify to a newer version +- After upgrading Dify to a newer version with a full `.env` file - When `.env.example` has been updated with new environment variables -- When managing a large or heavily customized `.env` file +- When managing a large or heavily customized `.env` file copied from `.env.example` **Usage** diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 87fa01f671..0f65c38098 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -1,4 +1,202 @@ -x-shared-env: &shared-api-worker-env +# Shared configuration using YAML anchors and env_file +x-shared-api-worker-config: &shared-api-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/api.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-config: &shared-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-beat-config: &shared-worker-beat-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker-beat.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + services: # Init container to fix permissions init_permissions: @@ -21,12 +219,9 @@ services: # API service api: + <<: *shared-api-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'api' starts the API server. MODE: api SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -69,12 +264,9 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: + <<: *shared-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker' starts the Celery worker for processing all queues. MODE: worker SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -115,12 +307,9 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: + <<: *shared-worker-beat-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. MODE: beat depends_on: init_permissions: @@ -154,6 +343,12 @@ services: web: image: langgenius/dify-web:1.14.0 restart: always + env_file: + - path: ./envs/core-services/web.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} @@ -170,8 +365,8 @@ services: ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} @@ -228,7 +423,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -270,6 +465,12 @@ services: sandbox: image: langgenius/dify-sandbox:0.2.15 restart: always + env_file: + - path: ./envs/core-services/sandbox.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: # The DifySandbox configurations # Make sure you are changing this key for your deployment with a strong key. @@ -294,9 +495,24 @@ services: plugin_daemon: image: langgenius/dify-plugin-daemon:0.6.0-local restart: always + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/plugin-daemon.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default environment: - # Use the shared environment variables. - <<: *shared-api-worker-env DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} DB_SSL_MODE: ${DB_SSL_MODE:-disable} SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} @@ -402,8 +618,8 @@ services: - ./certbot/update-cert.template.txt:/update-cert.template.txt - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh environment: - - CERTBOT_EMAIL=${CERTBOT_EMAIL} - - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_EMAIL=${CERTBOT_EMAIL:-} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN:-} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} entrypoint: ["/docker-entrypoint.sh"] command: ["tail", "-f", "/dev/null"] diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index 23c26c6695..0ad406a63b 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -51,7 +51,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index a72136049d..0f8458a58f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -4,724 +4,204 @@ # or docker-compose-template.yaml and regenerate this file. # ================================================================== -x-shared-env: &shared-api-worker-env - CONSOLE_API_URL: ${CONSOLE_API_URL:-} - CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} - SERVICE_API_URL: ${SERVICE_API_URL:-} - TRIGGER_URL: ${TRIGGER_URL:-http://localhost} - APP_API_URL: ${APP_API_URL:-} - APP_WEB_URL: ${APP_WEB_URL:-} - FILES_URL: ${FILES_URL:-} - INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-} - LANG: ${LANG:-C.UTF-8} - LC_ALL: ${LC_ALL:-C.UTF-8} - PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8} - UV_CACHE_DIR: ${UV_CACHE_DIR:-/tmp/.uv-cache} - LOG_LEVEL: ${LOG_LEVEL:-INFO} - LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text} - LOG_FILE: ${LOG_FILE:-/app/logs/server.log} - LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} - LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} - LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} - LOG_TZ: ${LOG_TZ:-UTC} - DEBUG: ${DEBUG:-false} - FLASK_DEBUG: ${FLASK_DEBUG:-false} - ENABLE_REQUEST_LOGGING: ${ENABLE_REQUEST_LOGGING:-False} - SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} - INIT_PASSWORD: ${INIT_PASSWORD:-} - DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} - CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} - OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} - MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} - FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} - ENABLE_COLLABORATION_MODE: ${ENABLE_COLLABORATION_MODE:-false} - ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} - REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} - APP_DEFAULT_ACTIVE_REQUESTS: ${APP_DEFAULT_ACTIVE_REQUESTS:-0} - APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} - APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} - DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} - DIFY_PORT: ${DIFY_PORT:-5001} - SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} - SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} - SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} - CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} - GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} - CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-4} - CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} - CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} - CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} - API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} - API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} - ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} - ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} - ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} - NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false} - DB_TYPE: ${DB_TYPE:-postgresql} - DB_USERNAME: ${DB_USERNAME:-postgres} - DB_PASSWORD: ${DB_PASSWORD:-difyai123456} - DB_HOST: ${DB_HOST:-db_postgres} - DB_PORT: ${DB_PORT:-5432} - DB_DATABASE: ${DB_DATABASE:-dify} - SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} - SQLALCHEMY_MAX_OVERFLOW: ${SQLALCHEMY_MAX_OVERFLOW:-10} - SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} - SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} - SQLALCHEMY_POOL_PRE_PING: ${SQLALCHEMY_POOL_PRE_PING:-false} - SQLALCHEMY_POOL_USE_LIFO: ${SQLALCHEMY_POOL_USE_LIFO:-false} - SQLALCHEMY_POOL_TIMEOUT: ${SQLALCHEMY_POOL_TIMEOUT:-30} - POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-200} - POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} - POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} - POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} - POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} - POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-0} - POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0} - MYSQL_MAX_CONNECTIONS: ${MYSQL_MAX_CONNECTIONS:-1000} - MYSQL_INNODB_BUFFER_POOL_SIZE: ${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} - MYSQL_INNODB_LOG_FILE_SIZE: ${MYSQL_INNODB_LOG_FILE_SIZE:-128M} - MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT: ${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} - REDIS_HOST: ${REDIS_HOST:-redis} - REDIS_PORT: ${REDIS_PORT:-6379} - REDIS_USERNAME: ${REDIS_USERNAME:-} - REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} - REDIS_USE_SSL: ${REDIS_USE_SSL:-false} - REDIS_SSL_CERT_REQS: ${REDIS_SSL_CERT_REQS:-CERT_NONE} - REDIS_SSL_CA_CERTS: ${REDIS_SSL_CA_CERTS:-} - REDIS_SSL_CERTFILE: ${REDIS_SSL_CERTFILE:-} - REDIS_SSL_KEYFILE: ${REDIS_SSL_KEYFILE:-} - REDIS_DB: ${REDIS_DB:-0} - REDIS_KEY_PREFIX: ${REDIS_KEY_PREFIX:-} - REDIS_MAX_CONNECTIONS: ${REDIS_MAX_CONNECTIONS:-} - REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} - REDIS_SENTINELS: ${REDIS_SENTINELS:-} - REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} - REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} - REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} - REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} - REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} - REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} - REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} - REDIS_RETRY_RETRIES: ${REDIS_RETRY_RETRIES:-3} - REDIS_RETRY_BACKOFF_BASE: ${REDIS_RETRY_BACKOFF_BASE:-1.0} - REDIS_RETRY_BACKOFF_CAP: ${REDIS_RETRY_BACKOFF_CAP:-10.0} - REDIS_SOCKET_TIMEOUT: ${REDIS_SOCKET_TIMEOUT:-5.0} - REDIS_SOCKET_CONNECT_TIMEOUT: ${REDIS_SOCKET_CONNECT_TIMEOUT:-5.0} - REDIS_HEALTH_CHECK_INTERVAL: ${REDIS_HEALTH_CHECK_INTERVAL:-30} - CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} - CELERY_BACKEND: ${CELERY_BACKEND:-redis} - BROKER_USE_SSL: ${BROKER_USE_SSL:-false} - CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} - CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} - CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-} - CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} - CELERY_TASK_ANNOTATIONS: ${CELERY_TASK_ANNOTATIONS:-null} - WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} - CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} - COOKIE_DOMAIN: ${COOKIE_DOMAIN:-} - NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} - NEXT_PUBLIC_SOCKET_URL: ${NEXT_PUBLIC_SOCKET_URL:-ws://localhost} - NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5} - STORAGE_TYPE: ${STORAGE_TYPE:-opendal} - OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} - OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} - CLICKZETTA_VOLUME_TYPE: ${CLICKZETTA_VOLUME_TYPE:-user} - CLICKZETTA_VOLUME_NAME: ${CLICKZETTA_VOLUME_NAME:-} - CLICKZETTA_VOLUME_TABLE_PREFIX: ${CLICKZETTA_VOLUME_TABLE_PREFIX:-dataset_} - CLICKZETTA_VOLUME_DIFY_PREFIX: ${CLICKZETTA_VOLUME_DIFY_PREFIX:-dify_km} - S3_ENDPOINT: ${S3_ENDPOINT:-} - S3_REGION: ${S3_REGION:-us-east-1} - S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} - S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} - S3_SECRET_KEY: ${S3_SECRET_KEY:-} - S3_ADDRESS_STYLE: ${S3_ADDRESS_STYLE:-auto} - S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} - ARCHIVE_STORAGE_ENABLED: ${ARCHIVE_STORAGE_ENABLED:-false} - ARCHIVE_STORAGE_ENDPOINT: ${ARCHIVE_STORAGE_ENDPOINT:-} - ARCHIVE_STORAGE_ARCHIVE_BUCKET: ${ARCHIVE_STORAGE_ARCHIVE_BUCKET:-} - ARCHIVE_STORAGE_EXPORT_BUCKET: ${ARCHIVE_STORAGE_EXPORT_BUCKET:-} - ARCHIVE_STORAGE_ACCESS_KEY: ${ARCHIVE_STORAGE_ACCESS_KEY:-} - ARCHIVE_STORAGE_SECRET_KEY: ${ARCHIVE_STORAGE_SECRET_KEY:-} - ARCHIVE_STORAGE_REGION: ${ARCHIVE_STORAGE_REGION:-auto} - AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} - AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} - AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} - AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} - GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} - GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} - ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} - ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} - ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} - ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} - ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} - ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} - ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} - TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} - TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} - TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} - TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} - TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} - TENCENT_COS_CUSTOM_DOMAIN: ${TENCENT_COS_CUSTOM_DOMAIN:-your-custom-domain} - OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com} - OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} - OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} - OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} - OCI_REGION: ${OCI_REGION:-us-ashburn-1} - HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} - HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} - HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} - HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} - HUAWEI_OBS_PATH_STYLE: ${HUAWEI_OBS_PATH_STYLE:-false} - VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} - VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} - VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} - VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} - VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} - BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} - BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} - BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} - BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} - SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} - SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} - SUPABASE_URL: ${SUPABASE_URL:-your-server-url} - VECTOR_STORE: ${VECTOR_STORE:-weaviate} - VECTOR_INDEX_NAME_PREFIX: ${VECTOR_INDEX_NAME_PREFIX:-Vector_index} - WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} - WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} - WEAVIATE_GRPC_ENDPOINT: ${WEAVIATE_GRPC_ENDPOINT:-grpc://weaviate:50051} - WEAVIATE_TOKENIZATION: ${WEAVIATE_TOKENIZATION:-word} - OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} - OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} - OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} - OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} - OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} - OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} - OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} - OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false} - OCEANBASE_FULLTEXT_PARSER: ${OCEANBASE_FULLTEXT_PARSER:-ik} - SEEKDB_MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G} - QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} - QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} - QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} - QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} - QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} - QDRANT_REPLICATION_FACTOR: ${QDRANT_REPLICATION_FACTOR:-1} - MILVUS_URI: ${MILVUS_URI:-http://host.docker.internal:19530} - MILVUS_DATABASE: ${MILVUS_DATABASE:-} - MILVUS_TOKEN: ${MILVUS_TOKEN:-} - MILVUS_USER: ${MILVUS_USER:-} - MILVUS_PASSWORD: ${MILVUS_PASSWORD:-} - MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} - MILVUS_ANALYZER_PARAMS: ${MILVUS_ANALYZER_PARAMS:-} - MYSCALE_HOST: ${MYSCALE_HOST:-myscale} - MYSCALE_PORT: ${MYSCALE_PORT:-8123} - MYSCALE_USER: ${MYSCALE_USER:-default} - MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} - MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} - MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} - COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} - COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} - COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} - COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} - COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} - HOLOGRES_HOST: ${HOLOGRES_HOST:-} - HOLOGRES_PORT: ${HOLOGRES_PORT:-80} - HOLOGRES_DATABASE: ${HOLOGRES_DATABASE:-} - HOLOGRES_ACCESS_KEY_ID: ${HOLOGRES_ACCESS_KEY_ID:-} - HOLOGRES_ACCESS_KEY_SECRET: ${HOLOGRES_ACCESS_KEY_SECRET:-} - HOLOGRES_SCHEMA: ${HOLOGRES_SCHEMA:-public} - HOLOGRES_TOKENIZER: ${HOLOGRES_TOKENIZER:-jieba} - HOLOGRES_DISTANCE_METHOD: ${HOLOGRES_DISTANCE_METHOD:-Cosine} - HOLOGRES_BASE_QUANTIZATION_TYPE: ${HOLOGRES_BASE_QUANTIZATION_TYPE:-rabitq} - HOLOGRES_MAX_DEGREE: ${HOLOGRES_MAX_DEGREE:-64} - HOLOGRES_EF_CONSTRUCTION: ${HOLOGRES_EF_CONSTRUCTION:-400} - PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} - PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} - PGVECTOR_USER: ${PGVECTOR_USER:-postgres} - PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} - PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} - PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} - PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} - PGVECTOR_PG_BIGM: ${PGVECTOR_PG_BIGM:-false} - PGVECTOR_PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606} - VASTBASE_HOST: ${VASTBASE_HOST:-vastbase} - VASTBASE_PORT: ${VASTBASE_PORT:-5432} - VASTBASE_USER: ${VASTBASE_USER:-dify} - VASTBASE_PASSWORD: ${VASTBASE_PASSWORD:-Difyai123456} - VASTBASE_DATABASE: ${VASTBASE_DATABASE:-dify} - VASTBASE_MIN_CONNECTION: ${VASTBASE_MIN_CONNECTION:-1} - VASTBASE_MAX_CONNECTION: ${VASTBASE_MAX_CONNECTION:-5} - PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} - PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} - PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} - PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} - PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} - ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} - ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} - ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} - ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} - ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} - ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} - ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} - ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} - ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} - ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} - ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} - ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} - TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} - TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} - TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} - TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} - TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} - MATRIXONE_HOST: ${MATRIXONE_HOST:-matrixone} - MATRIXONE_PORT: ${MATRIXONE_PORT:-6001} - MATRIXONE_USER: ${MATRIXONE_USER:-dump} - MATRIXONE_PASSWORD: ${MATRIXONE_PASSWORD:-111} - MATRIXONE_DATABASE: ${MATRIXONE_DATABASE:-dify} - TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} - TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} - TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} - TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} - TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} - TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} - TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} - TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} - TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} - TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} - TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} - TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} - CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} - CHROMA_PORT: ${CHROMA_PORT:-8000} - CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} - CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} - CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} - CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} - ORACLE_USER: ${ORACLE_USER:-dify} - ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} - ORACLE_DSN: ${ORACLE_DSN:-oracle:1521/FREEPDB1} - ORACLE_CONFIG_DIR: ${ORACLE_CONFIG_DIR:-/app/api/storage/wallet} - ORACLE_WALLET_LOCATION: ${ORACLE_WALLET_LOCATION:-/app/api/storage/wallet} - ORACLE_WALLET_PASSWORD: ${ORACLE_WALLET_PASSWORD:-dify} - ORACLE_IS_AUTONOMOUS: ${ORACLE_IS_AUTONOMOUS:-false} - ALIBABACLOUD_MYSQL_HOST: ${ALIBABACLOUD_MYSQL_HOST:-127.0.0.1} - ALIBABACLOUD_MYSQL_PORT: ${ALIBABACLOUD_MYSQL_PORT:-3306} - ALIBABACLOUD_MYSQL_USER: ${ALIBABACLOUD_MYSQL_USER:-root} - ALIBABACLOUD_MYSQL_PASSWORD: ${ALIBABACLOUD_MYSQL_PASSWORD:-difyai123456} - ALIBABACLOUD_MYSQL_DATABASE: ${ALIBABACLOUD_MYSQL_DATABASE:-dify} - ALIBABACLOUD_MYSQL_MAX_CONNECTION: ${ALIBABACLOUD_MYSQL_MAX_CONNECTION:-5} - ALIBABACLOUD_MYSQL_HNSW_M: ${ALIBABACLOUD_MYSQL_HNSW_M:-6} - RELYT_HOST: ${RELYT_HOST:-db} - RELYT_PORT: ${RELYT_PORT:-5432} - RELYT_USER: ${RELYT_USER:-postgres} - RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} - RELYT_DATABASE: ${RELYT_DATABASE:-postgres} - OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} - OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} - OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} - OPENSEARCH_VERIFY_CERTS: ${OPENSEARCH_VERIFY_CERTS:-true} - OPENSEARCH_AUTH_METHOD: ${OPENSEARCH_AUTH_METHOD:-basic} - OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} - OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} - OPENSEARCH_AWS_REGION: ${OPENSEARCH_AWS_REGION:-ap-southeast-1} - OPENSEARCH_AWS_SERVICE: ${OPENSEARCH_AWS_SERVICE:-aoss} - TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} - TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} - TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} - TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} - TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} - TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} - TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} - TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH: ${TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH:-false} - ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} - ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} - ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} - ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} - KIBANA_PORT: ${KIBANA_PORT:-5601} - ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false} - ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL} - ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY} - ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False} - ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-} - ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000} - ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True} - ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10} - BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} - BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} - BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} - BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} - BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} - BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} - BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} - BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: ${BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER:-DEFAULT_ANALYZER} - BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: ${BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE:-COARSE_MODE} - BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT:-500} - BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO:-0.05} - BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS: ${BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS:-300} - VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} - VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} - VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} - VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} - VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} - VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} - VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} - LINDORM_URL: ${LINDORM_URL:-http://localhost:30070} - LINDORM_USERNAME: ${LINDORM_USERNAME:-admin} - LINDORM_PASSWORD: ${LINDORM_PASSWORD:-admin} - LINDORM_USING_UGC: ${LINDORM_USING_UGC:-True} - LINDORM_QUERY_TIMEOUT: ${LINDORM_QUERY_TIMEOUT:-1} - OPENGAUSS_HOST: ${OPENGAUSS_HOST:-opengauss} - OPENGAUSS_PORT: ${OPENGAUSS_PORT:-6600} - OPENGAUSS_USER: ${OPENGAUSS_USER:-postgres} - OPENGAUSS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123} - OPENGAUSS_DATABASE: ${OPENGAUSS_DATABASE:-dify} - OPENGAUSS_MIN_CONNECTION: ${OPENGAUSS_MIN_CONNECTION:-1} - OPENGAUSS_MAX_CONNECTION: ${OPENGAUSS_MAX_CONNECTION:-5} - OPENGAUSS_ENABLE_PQ: ${OPENGAUSS_ENABLE_PQ:-false} - HUAWEI_CLOUD_HOSTS: ${HUAWEI_CLOUD_HOSTS:-https://127.0.0.1:9200} - HUAWEI_CLOUD_USER: ${HUAWEI_CLOUD_USER:-admin} - HUAWEI_CLOUD_PASSWORD: ${HUAWEI_CLOUD_PASSWORD:-admin} - UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} - UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} - TABLESTORE_ENDPOINT: ${TABLESTORE_ENDPOINT:-https://instance-name.cn-hangzhou.ots.aliyuncs.com} - TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name} - TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} - TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} - TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false} - CLICKZETTA_USERNAME: ${CLICKZETTA_USERNAME:-} - CLICKZETTA_PASSWORD: ${CLICKZETTA_PASSWORD:-} - CLICKZETTA_INSTANCE: ${CLICKZETTA_INSTANCE:-} - CLICKZETTA_SERVICE: ${CLICKZETTA_SERVICE:-api.clickzetta.com} - CLICKZETTA_WORKSPACE: ${CLICKZETTA_WORKSPACE:-quick_start} - CLICKZETTA_VCLUSTER: ${CLICKZETTA_VCLUSTER:-default_ap} - CLICKZETTA_SCHEMA: ${CLICKZETTA_SCHEMA:-dify} - CLICKZETTA_BATCH_SIZE: ${CLICKZETTA_BATCH_SIZE:-100} - CLICKZETTA_ENABLE_INVERTED_INDEX: ${CLICKZETTA_ENABLE_INVERTED_INDEX:-true} - CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese} - CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart} - CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance} - IRIS_HOST: ${IRIS_HOST:-iris} - IRIS_SUPER_SERVER_PORT: ${IRIS_SUPER_SERVER_PORT:-1972} - IRIS_WEB_SERVER_PORT: ${IRIS_WEB_SERVER_PORT:-52773} - IRIS_USER: ${IRIS_USER:-_SYSTEM} - IRIS_PASSWORD: ${IRIS_PASSWORD:-Dify@1234} - IRIS_DATABASE: ${IRIS_DATABASE:-USER} - IRIS_SCHEMA: ${IRIS_SCHEMA:-dify} - IRIS_CONNECTION_URL: ${IRIS_CONNECTION_URL:-} - IRIS_MIN_CONNECTION: ${IRIS_MIN_CONNECTION:-1} - IRIS_MAX_CONNECTION: ${IRIS_MAX_CONNECTION:-3} - IRIS_TEXT_INDEX: ${IRIS_TEXT_INDEX:-true} - IRIS_TEXT_INDEX_LANGUAGE: ${IRIS_TEXT_INDEX_LANGUAGE:-en} - IRIS_TIMEZONE: ${IRIS_TIMEZONE:-UTC} - UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} - UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} - UPLOAD_FILE_EXTENSION_BLACKLIST: ${UPLOAD_FILE_EXTENSION_BLACKLIST:-} - SINGLE_CHUNK_ATTACHMENT_LIMIT: ${SINGLE_CHUNK_ATTACHMENT_LIMIT:-10} - IMAGE_FILE_BATCH_LIMIT: ${IMAGE_FILE_BATCH_LIMIT:-10} - ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: ${ATTACHMENT_IMAGE_FILE_SIZE_LIMIT:-2} - ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: ${ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT:-60} - ETL_TYPE: ${ETL_TYPE:-dify} - UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} - UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} - SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} - PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} - CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} - PLUGIN_BASED_TOKEN_COUNTING_ENABLED: ${PLUGIN_BASED_TOKEN_COUNTING_ENABLED:-false} - MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} - UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} - UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} - UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} - SENTRY_DSN: ${SENTRY_DSN:-} - API_SENTRY_DSN: ${API_SENTRY_DSN:-} - API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} - API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} - WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} - PLUGIN_SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false} - PLUGIN_SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-} - NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} - NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} - NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} - NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} - MAIL_TYPE: ${MAIL_TYPE:-resend} - MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} - RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} - RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} - SMTP_SERVER: ${SMTP_SERVER:-} - SMTP_PORT: ${SMTP_PORT:-465} - SMTP_USERNAME: ${SMTP_USERNAME:-} - SMTP_PASSWORD: ${SMTP_PASSWORD:-} - SMTP_USE_TLS: ${SMTP_USE_TLS:-true} - SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} - SMTP_LOCAL_HOSTNAME: ${SMTP_LOCAL_HOSTNAME:-} - SENDGRID_API_KEY: ${SENDGRID_API_KEY:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} - INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} - RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} - EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES: ${EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES:-5} - CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES: ${CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES:-5} - OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES: ${OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES:-5} - CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} - CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} - CODE_EXECUTION_SSL_VERIFY: ${CODE_EXECUTION_SSL_VERIFY:-True} - CODE_EXECUTION_POOL_MAX_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_CONNECTIONS:-100} - CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS:-20} - CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY: ${CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY:-5.0} - CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} - CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} - CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} - CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} - CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-400000} - CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} - CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} - CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} - CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} - CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} - CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} - TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-400000} - WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} - WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} - WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} - MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} - WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} - GRAPH_ENGINE_MIN_WORKERS: ${GRAPH_ENGINE_MIN_WORKERS:-1} - GRAPH_ENGINE_MAX_WORKERS: ${GRAPH_ENGINE_MAX_WORKERS:-10} - GRAPH_ENGINE_SCALE_UP_THRESHOLD: ${GRAPH_ENGINE_SCALE_UP_THRESHOLD:-3} - GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME: ${GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME:-5.0} - WORKFLOW_NODE_EXECUTION_STORAGE: ${WORKFLOW_NODE_EXECUTION_STORAGE:-rdbms} - CORE_WORKFLOW_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository} - CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository} - API_WORKFLOW_RUN_REPOSITORY: ${API_WORKFLOW_RUN_REPOSITORY:-repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository} - API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository} - WORKFLOW_LOG_CLEANUP_ENABLED: ${WORKFLOW_LOG_CLEANUP_ENABLED:-false} - WORKFLOW_LOG_RETENTION_DAYS: ${WORKFLOW_LOG_RETENTION_DAYS:-30} - WORKFLOW_LOG_CLEANUP_BATCH_SIZE: ${WORKFLOW_LOG_CLEANUP_BATCH_SIZE:-100} - WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS: ${WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS:-} - ALIYUN_SLS_ACCESS_KEY_ID: ${ALIYUN_SLS_ACCESS_KEY_ID:-} - ALIYUN_SLS_ACCESS_KEY_SECRET: ${ALIYUN_SLS_ACCESS_KEY_SECRET:-} - ALIYUN_SLS_ENDPOINT: ${ALIYUN_SLS_ENDPOINT:-} - ALIYUN_SLS_REGION: ${ALIYUN_SLS_REGION:-} - ALIYUN_SLS_PROJECT_NAME: ${ALIYUN_SLS_PROJECT_NAME:-} - ALIYUN_SLS_LOGSTORE_TTL: ${ALIYUN_SLS_LOGSTORE_TTL:-365} - LOGSTORE_DUAL_WRITE_ENABLED: ${LOGSTORE_DUAL_WRITE_ENABLED:-false} - LOGSTORE_DUAL_READ_ENABLED: ${LOGSTORE_DUAL_READ_ENABLED:-true} - LOGSTORE_ENABLE_PUT_GRAPH_FIELD: ${LOGSTORE_ENABLE_PUT_GRAPH_FIELD:-true} - HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} - HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} - HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True} - HTTP_REQUEST_MAX_CONNECT_TIMEOUT: ${HTTP_REQUEST_MAX_CONNECT_TIMEOUT:-10} - HTTP_REQUEST_MAX_READ_TIMEOUT: ${HTTP_REQUEST_MAX_READ_TIMEOUT:-600} - HTTP_REQUEST_MAX_WRITE_TIMEOUT: ${HTTP_REQUEST_MAX_WRITE_TIMEOUT:-600} - WEBHOOK_REQUEST_BODY_MAX_SIZE: ${WEBHOOK_REQUEST_BODY_MAX_SIZE:-10485760} - RESPECT_XFORWARD_HEADERS_ENABLED: ${RESPECT_XFORWARD_HEADERS_ENABLED:-false} - SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} - SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} - LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} - MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} - MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} - TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} - EXPERIMENTAL_ENABLE_VINEXT: ${EXPERIMENTAL_ENABLE_VINEXT:-false} - ALLOW_INLINE_STYLES: ${ALLOW_INLINE_STYLES:-false} - ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} - MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} - PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} - MYSQL_HOST_VOLUME: ${MYSQL_HOST_VOLUME:-./volumes/mysql/data} - SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} - SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} - SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} - SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} - SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} - SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} - SANDBOX_PORT: ${SANDBOX_PORT:-8194} - WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} - WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} - WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} - WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} - WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} - WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} - WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} - WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} - WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} - WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} - WEAVIATE_DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false} - WEAVIATE_ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false} - WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false} - WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false} - CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} - CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} - CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} - ORACLE_PWD: ${ORACLE_PWD:-Dify123456} - ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} - ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} - ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} - ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} - ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} - MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} - MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} - ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} - MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} - MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} - PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} - PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} - PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} - PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} - OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} - OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} - OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} - OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} - OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} - OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} - OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} - OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} - OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} - NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} - NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} - NGINX_PORT: ${NGINX_PORT:-80} - NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} - NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} - NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} - NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3} - NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} - NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M} - NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} - NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} - NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} - NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} - CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} - CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} - CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} - SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} - SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} - SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} - SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} - SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} - SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} - SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} - SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} - SSRF_POOL_MAX_CONNECTIONS: ${SSRF_POOL_MAX_CONNECTIONS:-100} - SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS: ${SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS:-20} - SSRF_POOL_KEEPALIVE_EXPIRY: ${SSRF_POOL_KEEPALIVE_EXPIRY:-5.0} - EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} - EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} - POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} - POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} - POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} - POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} - POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} - POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} - CSP_WHITELIST: ${CSP_WHITELIST:-} - CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} - MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} - DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} - EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} - PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} - PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} - PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} - PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} - PLUGIN_MODEL_SCHEMA_CACHE_TTL: ${PLUGIN_MODEL_SCHEMA_CACHE_TTL:-3600} - PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} - PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} - PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} - EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} - EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} - PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} - PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} - ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} - MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} - MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} - CREATORS_PLATFORM_FEATURES_ENABLED: ${CREATORS_PLATFORM_FEATURES_ENABLED:-true} - CREATORS_PLATFORM_API_URL: ${CREATORS_PLATFORM_API_URL:-https://creators.dify.ai} - CREATORS_PLATFORM_OAUTH_CLIENT_ID: ${CREATORS_PLATFORM_OAUTH_CLIENT_ID:-} - FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} - ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES: ${ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES:-true} - PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} - PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} - PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} - PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} - PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0} - PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} - PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} - PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} - PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} - PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin} - PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} - PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} - PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} - PLUGIN_S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} - PLUGIN_S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} - PLUGIN_S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} - PLUGIN_S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} - PLUGIN_AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} - PLUGIN_AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-} - PLUGIN_AWS_REGION: ${PLUGIN_AWS_REGION:-} - PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-} - PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-} - PLUGIN_TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-} - PLUGIN_TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-} - PLUGIN_TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-} - PLUGIN_ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-} - PLUGIN_ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-} - PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-} - PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} - PLUGIN_ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} - PLUGIN_ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} - PLUGIN_VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} - PLUGIN_VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} - PLUGIN_VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} - PLUGIN_VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} - ENABLE_OTEL: ${ENABLE_OTEL:-false} - OTLP_TRACE_ENDPOINT: ${OTLP_TRACE_ENDPOINT:-} - OTLP_METRIC_ENDPOINT: ${OTLP_METRIC_ENDPOINT:-} - OTLP_BASE_ENDPOINT: ${OTLP_BASE_ENDPOINT:-http://localhost:4318} - OTLP_API_KEY: ${OTLP_API_KEY:-} - OTEL_EXPORTER_OTLP_PROTOCOL: ${OTEL_EXPORTER_OTLP_PROTOCOL:-} - OTEL_EXPORTER_TYPE: ${OTEL_EXPORTER_TYPE:-otlp} - OTEL_SAMPLING_RATE: ${OTEL_SAMPLING_RATE:-0.1} - OTEL_BATCH_EXPORT_SCHEDULE_DELAY: ${OTEL_BATCH_EXPORT_SCHEDULE_DELAY:-5000} - OTEL_MAX_QUEUE_SIZE: ${OTEL_MAX_QUEUE_SIZE:-2048} - OTEL_MAX_EXPORT_BATCH_SIZE: ${OTEL_MAX_EXPORT_BATCH_SIZE:-512} - OTEL_METRIC_EXPORT_INTERVAL: ${OTEL_METRIC_EXPORT_INTERVAL:-60000} - OTEL_BATCH_EXPORT_TIMEOUT: ${OTEL_BATCH_EXPORT_TIMEOUT:-10000} - OTEL_METRIC_EXPORT_TIMEOUT: ${OTEL_METRIC_EXPORT_TIMEOUT:-30000} - ALLOW_EMBED: ${ALLOW_EMBED:-false} - QUEUE_MONITOR_THRESHOLD: ${QUEUE_MONITOR_THRESHOLD:-200} - QUEUE_MONITOR_ALERT_EMAILS: ${QUEUE_MONITOR_ALERT_EMAILS:-} - QUEUE_MONITOR_INTERVAL: ${QUEUE_MONITOR_INTERVAL:-30} - SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-false} - SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html} - DSL_EXPORT_ENCRYPT_DATASET_ID: ${DSL_EXPORT_ENCRYPT_DATASET_ID:-true} - DATASET_MAX_SEGMENTS_PER_REQUEST: ${DATASET_MAX_SEGMENTS_PER_REQUEST:-0} - ENABLE_CLEAN_EMBEDDING_CACHE_TASK: ${ENABLE_CLEAN_EMBEDDING_CACHE_TASK:-false} - ENABLE_CLEAN_UNUSED_DATASETS_TASK: ${ENABLE_CLEAN_UNUSED_DATASETS_TASK:-false} - ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false} - ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK: ${ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK:-false} - ENABLE_CLEAN_MESSAGES: ${ENABLE_CLEAN_MESSAGES:-false} - ENABLE_WORKFLOW_RUN_CLEANUP_TASK: ${ENABLE_WORKFLOW_RUN_CLEANUP_TASK:-false} - ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: ${ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK:-false} - ENABLE_DATASETS_QUEUE_MONITOR: ${ENABLE_DATASETS_QUEUE_MONITOR:-false} - ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK: ${ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK:-true} - ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK: ${ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK:-true} - WORKFLOW_SCHEDULE_POLLER_INTERVAL: ${WORKFLOW_SCHEDULE_POLLER_INTERVAL:-1} - WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE: ${WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE:-100} - WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK: ${WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK:-0} - TENANT_ISOLATED_TASK_CONCURRENCY: ${TENANT_ISOLATED_TASK_CONCURRENCY:-1} - ANNOTATION_IMPORT_FILE_SIZE_LIMIT: ${ANNOTATION_IMPORT_FILE_SIZE_LIMIT:-2} - ANNOTATION_IMPORT_MAX_RECORDS: ${ANNOTATION_IMPORT_MAX_RECORDS:-10000} - ANNOTATION_IMPORT_MIN_RECORDS: ${ANNOTATION_IMPORT_MIN_RECORDS:-1} - ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE:-5} - ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR:-20} - ANNOTATION_IMPORT_MAX_CONCURRENT: ${ANNOTATION_IMPORT_MAX_CONCURRENT:-5} - AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-} - SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: ${SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD:-21} - SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE:-1000} - SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL:-200} - SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: ${SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS:-30} - EVENT_BUS_REDIS_URL: ${EVENT_BUS_REDIS_URL:-} - EVENT_BUS_REDIS_CHANNEL_TYPE: ${EVENT_BUS_REDIS_CHANNEL_TYPE:-pubsub} - EVENT_BUS_REDIS_USE_CLUSTERS: ${EVENT_BUS_REDIS_USE_CLUSTERS:-false} - ENABLE_HUMAN_INPUT_TIMEOUT_TASK: ${ENABLE_HUMAN_INPUT_TIMEOUT_TASK:-true} - HUMAN_INPUT_TIMEOUT_TASK_INTERVAL: ${HUMAN_INPUT_TIMEOUT_TASK_INTERVAL:-1} - SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL:-90000} +# Shared configuration using YAML anchors and env_file +x-shared-api-worker-config: &shared-api-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/api.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-config: &shared-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-beat-config: &shared-worker-beat-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker-beat.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always services: # Init container to fix permissions @@ -745,12 +225,9 @@ services: # API service api: + <<: *shared-api-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'api' starts the API server. MODE: api SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -793,12 +270,9 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: + <<: *shared-worker-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker' starts the Celery worker for processing all queues. MODE: worker SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -839,12 +313,9 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: + <<: *shared-worker-beat-config image: langgenius/dify-api:1.14.0 - restart: always environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. MODE: beat depends_on: init_permissions: @@ -878,6 +349,12 @@ services: web: image: langgenius/dify-web:1.14.0 restart: always + env_file: + - path: ./envs/core-services/web.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} @@ -894,8 +371,8 @@ services: ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} @@ -952,7 +429,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -994,6 +471,12 @@ services: sandbox: image: langgenius/dify-sandbox:0.2.15 restart: always + env_file: + - path: ./envs/core-services/sandbox.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: # The DifySandbox configurations # Make sure you are changing this key for your deployment with a strong key. @@ -1018,9 +501,24 @@ services: plugin_daemon: image: langgenius/dify-plugin-daemon:0.6.0-local restart: always + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/plugin-daemon.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default environment: - # Use the shared environment variables. - <<: *shared-api-worker-env DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} DB_SSL_MODE: ${DB_SSL_MODE:-disable} SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} @@ -1126,8 +624,8 @@ services: - ./certbot/update-cert.template.txt:/update-cert.template.txt - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh environment: - - CERTBOT_EMAIL=${CERTBOT_EMAIL} - - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_EMAIL=${CERTBOT_EMAIL:-} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN:-} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} entrypoint: ["/docker-entrypoint.sh"] command: ["tail", "-f", "/dev/null"] diff --git a/docker/envs/core-services/api.env.example b/docker/envs/core-services/api.env.example new file mode 100644 index 0000000000..1a3fc7a4ab --- /dev/null +++ b/docker/envs/core-services/api.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Api Configuration +# ------------------------------ + +MODE=api +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE=1.0 +SENTRY_PROFILES_SAMPLE_RATE=1.0 +PLUGIN_REMOTE_INSTALL_HOST=localhost +PLUGIN_REMOTE_INSTALL_PORT=5003 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_DAEMON_TIMEOUT=600.0 +INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 diff --git a/docker/envs/core-services/plugin-daemon.env.example b/docker/envs/core-services/plugin-daemon.env.example new file mode 100644 index 0000000000..c3b1bef974 --- /dev/null +++ b/docker/envs/core-services/plugin-daemon.env.example @@ -0,0 +1,23 @@ +# ------------------------------ +# Plugin Daemon Configuration +# ------------------------------ + +DB_PLUGIN_DATABASE=dify_plugin +PLUGIN_DAEMON_URL=http://plugin_daemon:5002 +PLUGIN_PPROF_ENABLED=false +PLUGIN_DIFY_INNER_API_URL=http://api:5001 +FORCE_VERIFYING_SIGNATURE=true +PLUGIN_STDIO_BUFFER_SIZE=1024 +PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 +PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 +PLUGIN_MAX_EXECUTION_TIMEOUT=600 +PLUGIN_DEBUGGING_HOST=0.0.0.0 +PLUGIN_DEBUGGING_PORT=5003 +PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi +PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +PLUGIN_DAEMON_PORT=5002 +CELERY_WORKER_CLASS= +PLUGIN_STORAGE_TYPE=local +PLUGIN_STORAGE_LOCAL_ROOT=/app/storage +PLUGIN_WORKING_PATH=/app/storage/cwd +PLUGIN_STORAGE_OSS_BUCKET= diff --git a/docker/envs/core-services/sandbox.env.example b/docker/envs/core-services/sandbox.env.example new file mode 100644 index 0000000000..5d4ee6614b --- /dev/null +++ b/docker/envs/core-services/sandbox.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Sandbox Configuration +# ------------------------------ + +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 +PIP_MIRROR_URL= +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 +SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 +SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200 +SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 +SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 diff --git a/docker/envs/core-services/shared.env.example b/docker/envs/core-services/shared.env.example new file mode 100644 index 0000000000..2a57f6954a --- /dev/null +++ b/docker/envs/core-services/shared.env.example @@ -0,0 +1,469 @@ +# ------------------------------ +# Shared API/Worker Configuration +# ------------------------------ + +CONSOLE_WEB_URL= +SERVICE_API_URL= +TRIGGER_URL=http://localhost +APP_WEB_URL= +FILES_URL= +INTERNAL_FILES_URL= +LANG=C.UTF-8 +LC_ALL=C.UTF-8 +PYTHONIOENCODING=utf-8 +UV_CACHE_DIR=/tmp/.uv-cache +CHECK_UPDATE_URL=https://updates.dify.ai +OPENAI_API_BASE=https://api.openai.com/v1 +MIGRATION_ENABLED=true +FILES_ACCESS_TIMEOUT=300 +ENABLE_COLLABORATION_MODE=false +CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 +CELERY_TASK_ANNOTATIONS=null +AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net +SUPABASE_URL=your-server-url +TIDB_ON_QDRANT_URL=http://127.0.0.1 +TIDB_ON_QDRANT_API_KEY=dify +TIDB_API_URL=http://127.0.0.1 +TIDB_IAM_API_URL=http://127.0.0.1 +TIDB_REGION=regions/aws-us-east-1 +TIDB_PROJECT_ID=dify +TIDB_SPEND_LIMIT=100 +TENCENT_VECTOR_DB_URL=http://127.0.0.1 +TENCENT_VECTOR_DB_API_KEY=dify +LINDORM_URL=http://localhost:30070 +LINDORM_USERNAME=admin +UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io +UPLOAD_FILE_SIZE_LIMIT=15 +UPLOAD_FILE_BATCH_LIMIT=5 +UPLOAD_FILE_EXTENSION_BLACKLIST= +SINGLE_CHUNK_ATTACHMENT_LIMIT=10 +IMAGE_FILE_BATCH_LIMIT=10 +ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2 +ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60 +ETL_TYPE=dify +UNSTRUCTURED_API_URL= +MULTIMODAL_SEND_FORMAT=base64 +UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 +UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 +UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 +API_SENTRY_DSN= +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 +WEB_SENTRY_DSN= +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= +NOTION_INTEGRATION_TYPE=public +RESEND_API_URL=https://api.resend.com +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 +PGDATA=/var/lib/postgresql/data/pgdata +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600 +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} +LOG_LEVEL=INFO +LOG_OUTPUT_FORMAT=text +LOG_FILE=/app/logs/server.log +LOG_FILE_MAX_SIZE=20 +LOG_FILE_BACKUP_COUNT=5 +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +LOG_TZ=UTC +DEBUG=false +FLASK_DEBUG=false +ENABLE_REQUEST_LOGGING=False +WORKFLOW_LOG_CLEANUP_ENABLED=false +WORKFLOW_LOG_RETENTION_DAYS=30 +WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 +WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS= +EXPOSE_PLUGIN_DEBUGGING_HOST=localhost +EXPOSE_PLUGIN_DEBUGGING_PORT=5003 +DEPLOY_ENV=PRODUCTION +ACCESS_TOKEN_EXPIRE_MINUTES=60 +REFRESH_TOKEN_EXPIRE_DAYS=30 +APP_DEFAULT_ACTIVE_REQUESTS=0 +APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 +DIFY_BIND_ADDRESS=0.0.0.0 +DIFY_PORT=5001 +SERVER_WORKER_AMOUNT=1 +SERVER_WORKER_CLASS=gevent +SERVER_WORKER_CONNECTIONS=10 +CELERY_SENTINEL_PASSWORD= +S3_ACCESS_KEY= +S3_SECRET_KEY= +ARCHIVE_STORAGE_ACCESS_KEY= +ARCHIVE_STORAGE_SECRET_KEY= +AZURE_BLOB_ACCOUNT_KEY=difyai +ALIYUN_OSS_ACCESS_KEY=your-access-key +ALIYUN_OSS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_ID=your-secret-id +OCI_ACCESS_KEY=your-access-key +OCI_SECRET_KEY=your-secret-key +HUAWEI_OBS_SECRET_KEY=your-secret-key +HUAWEI_OBS_ACCESS_KEY=your-access-key +VOLCENGINE_TOS_SECRET_KEY=your-secret-key +VOLCENGINE_TOS_ACCESS_KEY=your-access-key +BAIDU_OBS_SECRET_KEY=your-secret-key +BAIDU_OBS_ACCESS_KEY=your-access-key +SUPABASE_API_KEY=your-access-key +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +RELYT_PASSWORD=difyai123456 +LINDORM_PASSWORD=admin +LINDORM_USING_UGC=True +LINDORM_QUERY_TIMEOUT=1 +HUAWEI_CLOUD_PASSWORD=admin +UPSTASH_VECTOR_TOKEN=dify +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx +TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false +CLICKZETTA_PASSWORD= +CLICKZETTA_INSTANCE= +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=quick_start +CLICKZETTA_VCLUSTER=default_ap +CLICKZETTA_SCHEMA=dify +CLICKZETTA_BATCH_SIZE=100 +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese +CLICKZETTA_ANALYZER_MODE=smart +UNSTRUCTURED_API_KEY= +SCARF_NO_ANALYTICS=true +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false +NOTION_CLIENT_SECRET= +NOTION_CLIENT_ID= +NOTION_INTERNAL_SECRET= +MAIL_TYPE=resend +MAIL_DEFAULT_SEND_FROM= +RESEND_API_KEY=your-resend-api-key +SMTP_SERVER= +SMTP_PORT=465 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_USE_TLS=true +SMTP_OPPORTUNISTIC_TLS=false +SMTP_LOCAL_HOSTNAME= +SENDGRID_API_KEY= +INVITE_EXPIRY_HOURS=72 +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_EXECUTION_SSL_VERIFY=True +CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 +CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_MAX_NUMBER=9223372036854775807 +CODE_MIN_NUMBER=-9223372036854775808 +CODE_MAX_DEPTH=5 +CODE_MAX_PRECISION=20 +CODE_MAX_STRING_LENGTH=400000 +CODE_MAX_STRING_ARRAY_LENGTH=30 +CODE_MAX_OBJECT_ARRAY_LENGTH=30 +CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=400000 +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 +MAX_VARIABLE_SIZE=204800 +WORKFLOW_FILE_UPLOAD_LIMIT=10 +GRAPH_ENGINE_MIN_WORKERS=1 +GRAPH_ENGINE_MAX_WORKERS=10 +GRAPH_ENGINE_SCALE_UP_THRESHOLD=3 +GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0 +ALIYUN_SLS_ACCESS_KEY_ID= +ALIYUN_SLS_ACCESS_KEY_SECRET= +WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760 +RESPECT_XFORWARD_HEADERS_ENABLED=false +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 +SSRF_POOL_MAX_CONNECTIONS=100 +SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +SSRF_POOL_KEEPALIVE_EXPIRY=5.0 +PLUGIN_AWS_ACCESS_KEY= +PLUGIN_AWS_SECRET_KEY= +PLUGIN_AWS_REGION= +PLUGIN_TENCENT_COS_SECRET_KEY= +PLUGIN_TENCENT_COS_SECRET_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= +PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= +PLUGIN_VOLCENGINE_TOS_SECRET_KEY= +OTLP_API_KEY= +OTEL_EXPORTER_OTLP_PROTOCOL= +OTEL_EXPORTER_TYPE=otlp +OTEL_SAMPLING_RATE=0.1 +OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 +OTEL_MAX_QUEUE_SIZE=2048 +OTEL_MAX_EXPORT_BATCH_SIZE=512 +OTEL_METRIC_EXPORT_INTERVAL=60000 +OTEL_BATCH_EXPORT_TIMEOUT=10000 +OTEL_METRIC_EXPORT_TIMEOUT=30000 +QUEUE_MONITOR_THRESHOLD=200 +QUEUE_MONITOR_ALERT_EMAILS= +QUEUE_MONITOR_INTERVAL=30 +SWAGGER_UI_ENABLED=false +SWAGGER_UI_PATH=/swagger-ui.html +DSL_EXPORT_ENCRYPT_DATASET_ID=true +DATASET_MAX_SEGMENTS_PER_REQUEST=0 +ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false +ENABLE_CLEAN_UNUSED_DATASETS_TASK=false +ENABLE_CREATE_TIDB_SERVERLESS_TASK=false +ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false +ENABLE_CLEAN_MESSAGES=false +ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false +ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false +ENABLE_DATASETS_QUEUE_MONITOR=false +ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true +ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true +WORKFLOW_SCHEDULE_POLLER_INTERVAL=1 +WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100 +WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0 +TENANT_ISOLATED_TASK_CONCURRENCY=1 +ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2 +ANNOTATION_IMPORT_MAX_RECORDS=10000 +ANNOTATION_IMPORT_MIN_RECORDS=1 +ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5 +ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20 +ANNOTATION_IMPORT_MAX_CONCURRENT=5 +CREATORS_PLATFORM_FEATURES_ENABLED=true +CREATORS_PLATFORM_API_URL=https://creators.dify.ai +CREATORS_PLATFORM_OAUTH_CLIENT_ID= +TIDB_VECTOR_DATABASE=dify +ALIBABACLOUD_MYSQL_HOST=127.0.0.1 +ALIBABACLOUD_MYSQL_PORT=3306 +ALIBABACLOUD_MYSQL_USER=root +ALIBABACLOUD_MYSQL_DATABASE=dify +ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 +ALIBABACLOUD_MYSQL_HNSW_M=6 +RELYT_DATABASE=postgres +TENCENT_VECTOR_DB_DATABASE=dify +BAIDU_VECTOR_DB_DATABASE=dify +EXPOSE_PLUGIN_DAEMON_PORT=5002 +GUNICORN_TIMEOUT=360 +CELERY_WORKER_AMOUNT= +CELERY_AUTO_SCALE=false +CELERY_MAX_WORKERS= +CELERY_MIN_WORKERS= +API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 +API_TOOL_DEFAULT_READ_TIMEOUT=60 +CELERY_BACKEND=redis +CELERY_USE_SENTINEL=false +CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 +WEB_API_CORS_ALLOW_ORIGINS=* +CONSOLE_CORS_ALLOW_ORIGINS=* +COOKIE_DOMAIN= +OPENDAL_SCHEME=fs +OPENDAL_FS_ROOT=storage +CLICKZETTA_VOLUME_TYPE=user +CLICKZETTA_VOLUME_NAME= +CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ +CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km +S3_ENDPOINT= +S3_REGION=us-east-1 +S3_BUCKET_NAME=difyai +S3_ADDRESS_STYLE=auto +S3_USE_AWS_MANAGED_IAM=false +ARCHIVE_STORAGE_ENABLED=false +ARCHIVE_STORAGE_ENDPOINT= +ARCHIVE_STORAGE_ARCHIVE_BUCKET= +ARCHIVE_STORAGE_EXPORT_BUCKET= +ARCHIVE_STORAGE_REGION=auto +AZURE_BLOB_ACCOUNT_NAME=difyai +AZURE_BLOB_CONTAINER_NAME=difyai-container +GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= +ALIYUN_OSS_BUCKET_NAME=your-bucket-name +ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com +ALIYUN_OSS_REGION=ap-southeast-1 +ALIYUN_OSS_AUTH_VERSION=v4 +ALIYUN_OSS_PATH=your-path +ALIYUN_CLOUDBOX_ID=your-cloudbox-id +TENCENT_COS_BUCKET_NAME=your-bucket-name +TENCENT_COS_REGION=your-region +TENCENT_COS_SCHEME=your-scheme +TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain +OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com +OCI_BUCKET_NAME=your-bucket-name +OCI_REGION=us-ashburn-1 +HUAWEI_OBS_BUCKET_NAME=your-bucket-name +HUAWEI_OBS_SERVER=your-server-url +HUAWEI_OBS_PATH_STYLE=false +VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name +VOLCENGINE_TOS_ENDPOINT=your-server-url +VOLCENGINE_TOS_REGION=your-region +BAIDU_OBS_BUCKET_NAME=your-bucket-name +BAIDU_OBS_ENDPOINT=your-server-url +SUPABASE_BUCKET_NAME=your-bucket-name +TENCENT_VECTOR_DB_TIMEOUT=30 +TENCENT_VECTOR_DB_USERNAME=dify +TENCENT_VECTOR_DB_SHARD=1 +TENCENT_VECTOR_DB_REPLICAS=2 +TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false +BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 +BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 +BAIDU_VECTOR_DB_ACCOUNT=root +BAIDU_VECTOR_DB_API_KEY=dify +BAIDU_VECTOR_DB_SHARD=1 +BAIDU_VECTOR_DB_REPLICAS=3 +BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER +BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE +BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500 +BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05 +BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300 +HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 +HUAWEI_CLOUD_USER=admin +WORKFLOW_NODE_EXECUTION_STORAGE=rdbms +CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository +CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository +API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository +API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository +ALIYUN_SLS_ENDPOINT= +ALIYUN_SLS_REGION= +ALIYUN_SLS_PROJECT_NAME= +ALIYUN_SLS_LOGSTORE_TTL=365 +LOGSTORE_DUAL_WRITE_ENABLED=false +LOGSTORE_DUAL_READ_ENABLED=true +LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 +HTTP_REQUEST_NODE_SSL_VERIFY=True +HTTP_REQUEST_MAX_CONNECT_TIMEOUT=10 +HTTP_REQUEST_MAX_READ_TIMEOUT=600 +HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 +PLUGIN_INSTALLED_PATH=plugin +PLUGIN_PACKAGE_CACHE_PATH=plugin_packages +PLUGIN_MEDIA_CACHE_PATH=assets +PLUGIN_S3_USE_AWS=false +PLUGIN_S3_USE_AWS_MANAGED_IAM=false +PLUGIN_S3_ENDPOINT= +PLUGIN_S3_USE_PATH_STYLE=false +PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= +PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= +PLUGIN_TENCENT_COS_REGION= +PLUGIN_ALIYUN_OSS_REGION= +PLUGIN_ALIYUN_OSS_ENDPOINT= +PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 +PLUGIN_ALIYUN_OSS_PATH= +PLUGIN_VOLCENGINE_TOS_ENDPOINT= +PLUGIN_VOLCENGINE_TOS_REGION= +ENABLE_OTEL=false +OTLP_TRACE_ENDPOINT= +OTLP_METRIC_ENDPOINT= +# Prefix used to create collection name in vector database +OTLP_BASE_ENDPOINT=http://localhost:4318 +WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051 +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_REGION_ID=cn-hangzhou +ANALYTICDB_INSTANCE_ID=gp-ab123456 +ANALYTICDB_ACCOUNT=testaccount +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE=dify +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +ANALYTICDB_HOST=gp-test.aliyuncs.com +ANALYTICDB_PORT=5432 +ANALYTICDB_MIN_CONNECTION=1 +ANALYTICDB_MAX_CONNECTION=5 +TIDB_VECTOR_HOST=tidb +TIDB_VECTOR_PORT=4000 +TIDB_VECTOR_USER= +TIDB_VECTOR_PASSWORD= +TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 +TIDB_ON_QDRANT_GRPC_ENABLED=false +TIDB_ON_QDRANT_GRPC_PORT=6334 +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +RELYT_HOST=db +RELYT_PORT=5432 +RELYT_USER=postgres +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +VIKINGDB_REGION=cn-shanghai +VIKINGDB_HOST=api-vikingdb.xxx.volces.com +VIKINGDB_SCHEME=http +VIKINGDB_CONNECTION_TIMEOUT=30 +VIKINGDB_SOCKET_TIMEOUT=30 +TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com +TABLESTORE_INSTANCE_NAME=instance-name +CLICKZETTA_USERNAME= +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 +POSITION_TOOL_PINS= +POSITION_TOOL_INCLUDES= +POSITION_TOOL_EXCLUDES= +POSITION_PROVIDER_PINS= +POSITION_PROVIDER_INCLUDES= +POSITION_PROVIDER_EXCLUDES= +CREATE_TIDB_SERVICE_JOB_ENABLED=false +MAX_SUBMIT_COUNT=100 + +# Vector Store Configuration +STORAGE_TYPE=opendal +VECTOR_STORE=weaviate +VECTOR_INDEX_NAME_PREFIX=Vector_index +WEAVIATE_ENDPOINT=http://weaviate:8080 +WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_TOKENIZATION=word +OCEANBASE_VECTOR_HOST=oceanbase +OCEANBASE_VECTOR_PORT=2881 +OCEANBASE_VECTOR_USER=root@test +OCEANBASE_VECTOR_PASSWORD=difyai123456 +OCEANBASE_VECTOR_DATABASE=test +OCEANBASE_ENABLE_HYBRID_SEARCH=false +OCEANBASE_FULLTEXT_PARSER=ik +SEEKDB_MEMORY_LIMIT=2G +QDRANT_URL=http://qdrant:6333 +QDRANT_API_KEY=difyai123456 +QDRANT_CLIENT_TIMEOUT=20 +QDRANT_GRPC_ENABLED=false +QDRANT_GRPC_PORT=6334 +QDRANT_REPLICATION_FACTOR=1 +MILVUS_URI=http://host.docker.internal:19530 +MILVUS_TOKEN= +MILVUS_USER= +MILVUS_PASSWORD= +MILVUS_ANALYZER_PARAMS= +PGVECTOR_HOST=pgvector +PGVECTOR_PORT=5432 +PGVECTOR_USER=postgres +PGVECTOR_PASSWORD=difyai123456 +PGVECTOR_DATABASE=dify +PGVECTOR_MIN_CONNECTION=1 +PGVECTOR_MAX_CONNECTION=5 +PGVECTOR_PG_BIGM=false +PGVECTOR_PG_BIGM_VERSION=1.2-20240606 + +# Hologres Configuration +HOLOGRES_HOST= +HOLOGRES_PORT=80 +HOLOGRES_DATABASE= +HOLOGRES_ACCESS_KEY_ID= +HOLOGRES_ACCESS_KEY_SECRET= +HOLOGRES_SCHEMA=public +HOLOGRES_TOKENIZER=jieba +HOLOGRES_DISTANCE_METHOD=Cosine +HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq +HOLOGRES_MAX_DEGREE=64 +HOLOGRES_EF_CONSTRUCTION=400 + +# Milvus API Configuration +MILVUS_DATABASE= +MILVUS_ENABLE_HYBRID_SEARCH=False + +# Human Input Task Configuration +ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true +HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1 diff --git a/docker/envs/core-services/web.env.example b/docker/envs/core-services/web.env.example new file mode 100644 index 0000000000..d366cd87ba --- /dev/null +++ b/docker/envs/core-services/web.env.example @@ -0,0 +1,30 @@ +# ------------------------------ +# Web Configuration +# ------------------------------ + +CONSOLE_API_URL= +APP_API_URL= +SENTRY_DSN= +NEXT_PUBLIC_SOCKET_URL=ws://localhost +EXPERIMENTAL_ENABLE_VINEXT=false +LOOP_NODE_MAX_COUNT=100 +MAX_TOOLS_NUM=10 +MAX_PARALLEL_LIMIT=10 +MAX_ITERATIONS_NUM=99 +TEXT_GENERATION_TIMEOUT_MS=60000 +ALLOW_INLINE_STYLES=false +ALLOW_UNSAFE_DATA_SCHEME=false +MAX_TREE_DEPTH=50 +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 +ALLOW_EMBED=false +AMPLITUDE_API_KEY= +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true +NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false +NEXT_PUBLIC_COOKIE_DOMAIN= +NEXT_PUBLIC_BATCH_CONCURRENCY=5 +CSP_WHITELIST= +TOP_K_MAX_VALUE=10 diff --git a/docker/envs/core-services/worker-beat.env.example b/docker/envs/core-services/worker-beat.env.example new file mode 100644 index 0000000000..380fe02b68 --- /dev/null +++ b/docker/envs/core-services/worker-beat.env.example @@ -0,0 +1,8 @@ +# ------------------------------ +# Worker Beat Configuration +# ------------------------------ + +MODE=beat +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s diff --git a/docker/envs/core-services/worker.env.example b/docker/envs/core-services/worker.env.example new file mode 100644 index 0000000000..58cf4ea901 --- /dev/null +++ b/docker/envs/core-services/worker.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Worker Configuration +# ------------------------------ + +MODE=worker +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE=1.0 +SENTRY_PROFILES_SAMPLE_RATE=1.0 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s diff --git a/docker/envs/databases/db-mysql.env.example b/docker/envs/databases/db-mysql.env.example new file mode 100644 index 0000000000..b3ea6801fe --- /dev/null +++ b/docker/envs/databases/db-mysql.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Db Mysql Configuration +# ------------------------------ + +MYSQL_INNODB_LOG_FILE_SIZE=128M +MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2 +MYSQL_MAX_CONNECTIONS=1000 +MYSQL_INNODB_BUFFER_POOL_SIZE=512M +MYSQL_HOST_VOLUME=./volumes/mysql/data diff --git a/docker/envs/databases/db-postgres.env.example b/docker/envs/databases/db-postgres.env.example new file mode 100644 index 0000000000..14cefb6bee --- /dev/null +++ b/docker/envs/databases/db-postgres.env.example @@ -0,0 +1,26 @@ +# ------------------------------ +# Db Postgres Configuration +# ------------------------------ + +PGDATA=/var/lib/postgresql/data/pgdata +DB_TYPE=postgresql +DB_USERNAME=postgres +DB_PASSWORD=difyai123456 +DB_HOST=db_postgres +DB_PORT=5432 +DB_DATABASE=dify +SQLALCHEMY_POOL_SIZE=30 +SQLALCHEMY_MAX_OVERFLOW=10 +SQLALCHEMY_POOL_RECYCLE=3600 +SQLALCHEMY_ECHO=false +SQLALCHEMY_POOL_PRE_PING=false +SQLALCHEMY_POOL_USE_LIFO=false +SQLALCHEMY_POOL_TIMEOUT=30 +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback +POSTGRES_MAX_CONNECTIONS=100 +POSTGRES_SHARED_BUFFERS=128MB +POSTGRES_WORK_MEM=4MB +POSTGRES_MAINTENANCE_WORK_MEM=64MB +POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB +POSTGRES_STATEMENT_TIMEOUT=0 +POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0 diff --git a/docker/envs/databases/redis.env.example b/docker/envs/databases/redis.env.example new file mode 100644 index 0000000000..74bcb6525e --- /dev/null +++ b/docker/envs/databases/redis.env.example @@ -0,0 +1,35 @@ +# ------------------------------ +# Redis Configuration +# ------------------------------ + +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_USERNAME= +REDIS_PASSWORD=difyai123456 +REDIS_USE_SSL=false +REDIS_SSL_CERT_REQS=CERT_NONE +REDIS_SSL_CA_CERTS= +REDIS_SSL_CERTFILE= +REDIS_SSL_KEYFILE= +REDIS_DB=0 +REDIS_KEY_PREFIX= +REDIS_MAX_CONNECTIONS= +REDIS_USE_SENTINEL=false +REDIS_SENTINELS= +REDIS_SENTINEL_SERVICE_NAME= +REDIS_SENTINEL_USERNAME= +REDIS_SENTINEL_PASSWORD= +REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 +REDIS_USE_CLUSTERS=false +REDIS_CLUSTERS= +REDIS_CLUSTERS_PASSWORD= +REDIS_RETRY_RETRIES=3 +REDIS_RETRY_BACKOFF_BASE=1.0 +REDIS_RETRY_BACKOFF_CAP=10.0 +REDIS_SOCKET_TIMEOUT=5.0 +REDIS_SOCKET_CONNECT_TIMEOUT=5.0 +REDIS_HEALTH_CHECK_INTERVAL=30 +EVENT_BUS_REDIS_URL= +EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub +EVENT_BUS_REDIS_USE_CLUSTERS=false +BROKER_USE_SSL=false diff --git a/docker/envs/infrastructure/certbot.env.example b/docker/envs/infrastructure/certbot.env.example new file mode 100644 index 0000000000..c654fbe02f --- /dev/null +++ b/docker/envs/infrastructure/certbot.env.example @@ -0,0 +1,7 @@ +# ------------------------------ +# Certbot Configuration +# ------------------------------ + +CERTBOT_EMAIL=your_email@example.com +CERTBOT_DOMAIN=your_domain.com +CERTBOT_OPTIONS= diff --git a/docker/envs/infrastructure/etcd.env.example b/docker/envs/infrastructure/etcd.env.example new file mode 100644 index 0000000000..4dca26671a --- /dev/null +++ b/docker/envs/infrastructure/etcd.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Etcd Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/milvus-standalone.env.example b/docker/envs/infrastructure/milvus-standalone.env.example new file mode 100644 index 0000000000..7e87ed2648 --- /dev/null +++ b/docker/envs/infrastructure/milvus-standalone.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Milvus Standalone Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/minio.env.example b/docker/envs/infrastructure/minio.env.example new file mode 100644 index 0000000000..7c8e1fa35a --- /dev/null +++ b/docker/envs/infrastructure/minio.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Minio Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/nginx.env.example b/docker/envs/infrastructure/nginx.env.example new file mode 100644 index 0000000000..fbe86680ba --- /dev/null +++ b/docker/envs/infrastructure/nginx.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Nginx Configuration +# ------------------------------ + +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +NGINX_PORT=80 +NGINX_SSL_PORT=443 +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s +NGINX_ENABLE_CERTBOT_CHALLENGE=false diff --git a/docker/envs/infrastructure/ssrf-proxy.env.example b/docker/envs/infrastructure/ssrf-proxy.env.example new file mode 100644 index 0000000000..210a782494 --- /dev/null +++ b/docker/envs/infrastructure/ssrf-proxy.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Ssrf Proxy Configuration +# ------------------------------ + +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 +SSRF_POOL_MAX_CONNECTIONS=100 +SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +SSRF_POOL_KEEPALIVE_EXPIRY=5.0 diff --git a/docker/middleware.env.example b/docker/envs/middleware.env.example similarity index 100% rename from docker/middleware.env.example rename to docker/envs/middleware.env.example diff --git a/docker/envs/security.env.example b/docker/envs/security.env.example new file mode 100644 index 0000000000..787aef2706 --- /dev/null +++ b/docker/envs/security.env.example @@ -0,0 +1,40 @@ +# ------------------------------ +# Security Configuration +# ------------------------------ + +TIDB_ON_QDRANT_API_KEY=dify +TENCENT_VECTOR_DB_API_KEY=dify +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +RELYT_PASSWORD=difyai123456 +LINDORM_PASSWORD=admin +HUAWEI_CLOUD_PASSWORD=admin +UPSTASH_VECTOR_TOKEN=dify +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx +UNSTRUCTURED_API_KEY= +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false +NOTION_CLIENT_SECRET= +NOTION_INTERNAL_SECRET= +RESEND_API_KEY=your-resend-api-key +SMTP_PASSWORD= +SENDGRID_API_KEY= +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 +CODE_EXECUTION_API_KEY=dify-sandbox +ALIYUN_SLS_ACCESS_KEY_ID= +ALIYUN_SLS_ACCESS_KEY_SECRET= +OTLP_API_KEY= +BAIDU_VECTOR_DB_API_KEY=dify +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +TIDB_VECTOR_PASSWORD= +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U +INIT_PASSWORD= diff --git a/docker/envs/vectorstores/chroma.env.example b/docker/envs/vectorstores/chroma.env.example new file mode 100644 index 0000000000..2a15375a3d --- /dev/null +++ b/docker/envs/vectorstores/chroma.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Chroma Configuration +# ------------------------------ + +CHROMA_DATABASE=default_database +CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider +CHROMA_AUTH_CREDENTIALS= +CHROMA_HOST=127.0.0.1 +CHROMA_PORT=8000 +CHROMA_TENANT=default_tenant +CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 +CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider +CHROMA_IS_PERSISTENT=TRUE diff --git a/docker/envs/vectorstores/couchbase.env.example b/docker/envs/vectorstores/couchbase.env.example new file mode 100644 index 0000000000..4329d9c723 --- /dev/null +++ b/docker/envs/vectorstores/couchbase.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Couchbase Configuration +# ------------------------------ + +COUCHBASE_PASSWORD=password +COUCHBASE_BUCKET_NAME=Embeddings +COUCHBASE_SCOPE_NAME=_default +COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server +COUCHBASE_USER=Administrator diff --git a/docker/envs/vectorstores/elasticsearch.env.example b/docker/envs/vectorstores/elasticsearch.env.example new file mode 100644 index 0000000000..2aaa965cd7 --- /dev/null +++ b/docker/envs/vectorstores/elasticsearch.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Elasticsearch Configuration +# ------------------------------ + +ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL +ELASTICSEARCH_PASSWORD=elastic +KIBANA_PORT=5601 +ELASTICSEARCH_USE_CLOUD=false +ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY +ELASTICSEARCH_VERIFY_CERTS=False +ELASTICSEARCH_CA_CERTS= +ELASTICSEARCH_REQUEST_TIMEOUT=100000 +ELASTICSEARCH_RETRY_ON_TIMEOUT=True +ELASTICSEARCH_MAX_RETRIES=10 +ELASTICSEARCH_HOST=0.0.0.0 +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_USERNAME=elastic diff --git a/docker/envs/vectorstores/iris.env.example b/docker/envs/vectorstores/iris.env.example new file mode 100644 index 0000000000..b1eb39bff8 --- /dev/null +++ b/docker/envs/vectorstores/iris.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Iris Configuration +# ------------------------------ + +IRIS_CONNECTION_URL= +IRIS_MIN_CONNECTION=1 +IRIS_MAX_CONNECTION=3 +IRIS_TEXT_INDEX=true +IRIS_TEXT_INDEX_LANGUAGE=en +IRIS_TIMEZONE=UTC +IRIS_PASSWORD=Dify@1234 +IRIS_DATABASE=USER +IRIS_SCHEMA=dify +IRIS_HOST=iris +IRIS_SUPER_SERVER_PORT=1972 +IRIS_WEB_SERVER_PORT=52773 +IRIS_USER=_SYSTEM diff --git a/docker/envs/vectorstores/matrixone.env.example b/docker/envs/vectorstores/matrixone.env.example new file mode 100644 index 0000000000..931375f8b4 --- /dev/null +++ b/docker/envs/vectorstores/matrixone.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Matrixone Configuration +# ------------------------------ + +MATRIXONE_PASSWORD=111 +MATRIXONE_HOST=matrixone +MATRIXONE_PORT=6001 +MATRIXONE_USER=dump +MATRIXONE_DATABASE=dify diff --git a/docker/envs/vectorstores/milvus.env.example b/docker/envs/vectorstores/milvus.env.example new file mode 100644 index 0000000000..d16879ca7b --- /dev/null +++ b/docker/envs/vectorstores/milvus.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Milvus Configuration +# ------------------------------ + +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=minioadmin +ETCD_ENDPOINTS=etcd:2379 +MINIO_ADDRESS=minio:9000 +ETCD_AUTO_COMPACTION_MODE=revision +ETCD_AUTO_COMPACTION_RETENTION=1000 +ETCD_QUOTA_BACKEND_BYTES=4294967296 +ETCD_SNAPSHOT_COUNT=50000 +MILVUS_AUTHORIZATION_ENABLED=true diff --git a/docker/envs/vectorstores/myscale.env.example b/docker/envs/vectorstores/myscale.env.example new file mode 100644 index 0000000000..eaa9e88cc0 --- /dev/null +++ b/docker/envs/vectorstores/myscale.env.example @@ -0,0 +1,10 @@ +# ------------------------------ +# Myscale Configuration +# ------------------------------ + +MYSCALE_PASSWORD= +MYSCALE_DATABASE=dify +MYSCALE_FTS_PARAMS= +MYSCALE_HOST=myscale +MYSCALE_PORT=8123 +MYSCALE_USER=default diff --git a/docker/envs/vectorstores/oceanbase.env.example b/docker/envs/vectorstores/oceanbase.env.example new file mode 100644 index 0000000000..42bed8df6a --- /dev/null +++ b/docker/envs/vectorstores/oceanbase.env.example @@ -0,0 +1,6 @@ +# ------------------------------ +# Oceanbase Configuration +# ------------------------------ + +OCEANBASE_CLUSTER_NAME=difyai +OCEANBASE_MEMORY_LIMIT=6G diff --git a/docker/envs/vectorstores/opengauss.env.example b/docker/envs/vectorstores/opengauss.env.example new file mode 100644 index 0000000000..9f58499b64 --- /dev/null +++ b/docker/envs/vectorstores/opengauss.env.example @@ -0,0 +1,12 @@ +# ------------------------------ +# Opengauss Configuration +# ------------------------------ + +OPENGAUSS_PASSWORD=Dify@123 +OPENGAUSS_DATABASE=dify +OPENGAUSS_MIN_CONNECTION=1 +OPENGAUSS_MAX_CONNECTION=5 +OPENGAUSS_ENABLE_PQ=false +OPENGAUSS_HOST=opengauss +OPENGAUSS_PORT=6600 +OPENGAUSS_USER=postgres diff --git a/docker/envs/vectorstores/opensearch.env.example b/docker/envs/vectorstores/opensearch.env.example new file mode 100644 index 0000000000..a6a9283378 --- /dev/null +++ b/docker/envs/vectorstores/opensearch.env.example @@ -0,0 +1,22 @@ +# ------------------------------ +# Opensearch Configuration +# ------------------------------ + +OPENSEARCH_PASSWORD=admin +OPENSEARCH_AWS_REGION=ap-southeast-1 +OPENSEARCH_AWS_SERVICE=aoss +OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 +OPENSEARCH_MEMLOCK_SOFT=-1 +OPENSEARCH_MEMLOCK_HARD=-1 +OPENSEARCH_NOFILE_SOFT=65536 +OPENSEARCH_NOFILE_HARD=65536 +OPENSEARCH_HOST=opensearch +OPENSEARCH_PORT=9200 +OPENSEARCH_SECURE=true +OPENSEARCH_VERIFY_CERTS=true +OPENSEARCH_AUTH_METHOD=basic +OPENSEARCH_USER=admin +OPENSEARCH_DISCOVERY_TYPE=single-node +OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true +OPENSEARCH_JAVA_OPTS_MIN=512m +OPENSEARCH_JAVA_OPTS_MAX=1024m diff --git a/docker/envs/vectorstores/oracle.env.example b/docker/envs/vectorstores/oracle.env.example new file mode 100644 index 0000000000..c8f24db41a --- /dev/null +++ b/docker/envs/vectorstores/oracle.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Oracle Configuration +# ------------------------------ + +ORACLE_PASSWORD=dify +ORACLE_DSN=oracle:1521/FREEPDB1 +ORACLE_CONFIG_DIR=/app/api/storage/wallet +ORACLE_WALLET_LOCATION=/app/api/storage/wallet +ORACLE_WALLET_PASSWORD=dify +ORACLE_IS_AUTONOMOUS=false +ORACLE_USER=dify +ORACLE_PWD=Dify123456 +ORACLE_CHARACTERSET=AL32UTF8 diff --git a/docker/envs/vectorstores/pgvecto-rs.env.example b/docker/envs/vectorstores/pgvecto-rs.env.example new file mode 100644 index 0000000000..6428e5dd67 --- /dev/null +++ b/docker/envs/vectorstores/pgvecto-rs.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Pgvecto Rs Configuration +# ------------------------------ + +PGVECTO_RS_HOST=pgvecto-rs +PGVECTO_RS_PORT=5432 +PGVECTO_RS_USER=postgres +PGVECTO_RS_PASSWORD=difyai123456 +PGVECTO_RS_DATABASE=dify diff --git a/docker/envs/vectorstores/pgvector.env.example b/docker/envs/vectorstores/pgvector.env.example new file mode 100644 index 0000000000..9fd1dbf962 --- /dev/null +++ b/docker/envs/vectorstores/pgvector.env.example @@ -0,0 +1,8 @@ +# ------------------------------ +# Pgvector Configuration +# ------------------------------ + +PGVECTOR_PGUSER=postgres +PGVECTOR_POSTGRES_PASSWORD=difyai123456 +PGVECTOR_POSTGRES_DB=dify +PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata diff --git a/docker/envs/vectorstores/qdrant.env.example b/docker/envs/vectorstores/qdrant.env.example new file mode 100644 index 0000000000..a3555fe547 --- /dev/null +++ b/docker/envs/vectorstores/qdrant.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Qdrant Configuration +# ------------------------------ + diff --git a/docker/envs/vectorstores/seekdb.env.example b/docker/envs/vectorstores/seekdb.env.example new file mode 100644 index 0000000000..4307fbede2 --- /dev/null +++ b/docker/envs/vectorstores/seekdb.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Seekdb Configuration +# ------------------------------ + diff --git a/docker/envs/vectorstores/vastbase.env.example b/docker/envs/vectorstores/vastbase.env.example new file mode 100644 index 0000000000..2c9db50fbe --- /dev/null +++ b/docker/envs/vectorstores/vastbase.env.example @@ -0,0 +1,11 @@ +# ------------------------------ +# Vastbase Configuration +# ------------------------------ + +VASTBASE_PASSWORD=Difyai123456 +VASTBASE_DATABASE=dify +VASTBASE_MIN_CONNECTION=1 +VASTBASE_MAX_CONNECTION=5 +VASTBASE_HOST=vastbase +VASTBASE_PORT=5432 +VASTBASE_USER=dify diff --git a/docker/envs/vectorstores/weaviate.env.example b/docker/envs/vectorstores/weaviate.env.example new file mode 100644 index 0000000000..82a3ccb172 --- /dev/null +++ b/docker/envs/vectorstores/weaviate.env.example @@ -0,0 +1,18 @@ +# ------------------------------ +# Weaviate Configuration +# ------------------------------ + +WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai +WEAVIATE_DISABLE_TELEMETRY=false +WEAVIATE_ENABLE_TOKENIZER_GSE=false +WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false +WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false diff --git a/docker/generate_docker_compose b/docker/generate_docker_compose index 46d948f3c1..580091e006 100755 --- a/docker/generate_docker_compose +++ b/docker/generate_docker_compose @@ -64,25 +64,61 @@ def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"): return "\n".join(lines) -def insert_shared_env(template_path, output_path, shared_env_block, header_comments): +def create_env_files_from_example(env_example_path): """ - Inserts the shared environment variables block and header comments into the template file, - removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file. - Always writes with LF line endings. + Creates actual env files from .env.example by copying the categorized .env.example files. + This allows docker-compose to use env_file references. + Supports per-module structure with subdirectories. + """ + base_dir = os.path.dirname(os.path.abspath(env_example_path)) + root_env_file = os.path.join(base_dir, ".env") + if not os.path.exists(root_env_file): + with open(env_example_path, "r", encoding="utf-8") as src, open( + root_env_file, "w", encoding="utf-8", newline="\n" + ) as dst: + dst.write(src.read()) + print(f"Created {root_env_file}") + else: + print(f"{root_env_file} already exists, skipping") + + envs_dir = os.path.join(base_dir, "envs") + if not os.path.isdir(envs_dir): + print(f"No envs directory found at {envs_dir}, skipping split env files") + return [] + + created_files = [] + # Walk through all .env.example files in subdirectories + for root, dirs, files in os.walk(envs_dir): + for file in files: + if file.endswith('.env.example'): + example_file = os.path.join(root, file) + env_file = example_file.replace('.env.example', '.env') + + if os.path.exists(env_file): + print(f"{env_file} already exists, skipping") + continue + + # Copy .example to actual file + with open(example_file, "r", encoding="utf-8") as src, open( + env_file, "w", encoding="utf-8", newline="\n" + ) as dst: + dst.write(src.read()) + created_files.append(env_file) + print(f"Created {env_file}") + + return created_files + + +def insert_shared_env(template_path, output_path, header_comments): + """ + Copies the template file to output path with header comments. + The template now uses env_file references instead of a huge YAML anchor. """ with open(template_path, "r", encoding="utf-8") as f: template_content = f.read() - # Remove existing x-shared-env: &shared-api-worker-env lines - template_content = re.sub( - r"^x-shared-env: &shared-api-worker-env\s*\n?", - "", - template_content, - flags=re.MULTILINE, - ) - - # Prepare the final content with header comments and shared env block - final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}" + # Prepare the final content with header comments + final_content = f"{header_comments}\n{template_content}" with open(output_path, "w", encoding="utf-8", newline="\n") as f: f.write(final_content) @@ -90,10 +126,10 @@ def insert_shared_env(template_path, output_path, shared_env_block, header_comme def main(): - env_example_path = ".env.example" - template_path = "docker-compose-template.yaml" - output_path = "docker-compose.yaml" - anchor_name = "shared-api-worker-env" # Can be modified as needed + base_dir = os.path.dirname(os.path.abspath(__file__)) + env_example_path = os.path.join(base_dir, ".env.example") + template_path = os.path.join(base_dir, "docker-compose-template.yaml") + output_path = os.path.join(base_dir, "docker-compose.yaml") # Define header comments to be added at the top of docker-compose.yaml header_comments = ( @@ -110,17 +146,14 @@ def main(): print(f"Error: File {path} does not exist.") sys.exit(1) - # Parse .env.example file - env_vars = parse_env_example(env_example_path) + # Create env files from categorized .env.example files + # These files are used by docker-compose's env_file directive + # This ensures .env files exist even in CI/CD environments + create_env_files_from_example(env_example_path) - if not env_vars: - print("Warning: No environment variables found in .env.example.") - - # Generate shared environment variables block - shared_env_block = generate_shared_env_block(env_vars, anchor_name) - - # Insert shared environment variables block and header comments into the template - insert_shared_env(template_path, output_path, shared_env_block, header_comments) + # Copy template to output with header comments + # The template now uses env_file references instead of a huge YAML anchor + insert_shared_env(template_path, output_path, header_comments) if __name__ == "__main__": diff --git a/e2e/scripts/common.ts b/e2e/scripts/common.ts index ea6c897b2d..2964892dd0 100644 --- a/e2e/scripts/common.ts +++ b/e2e/scripts/common.ts @@ -36,7 +36,7 @@ export const webDir = path.join(rootDir, 'web') export const middlewareComposeFile = path.join(dockerDir, 'docker-compose.middleware.yaml') export const middlewareEnvFile = path.join(dockerDir, 'middleware.env') -export const middlewareEnvExampleFile = path.join(dockerDir, 'middleware.env.example') +export const middlewareEnvExampleFile = path.join(dockerDir, 'envs', 'middleware.env.example') export const webEnvLocalFile = path.join(webDir, '.env.local') export const webEnvExampleFile = path.join(webDir, '.env.example') export const apiEnvExampleFile = path.join(apiDir, 'tests', 'integration_tests', '.env.example') diff --git a/eslint-suppressions.json b/eslint-suppressions.json index bbb5cd5af9..cb41ef5f83 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -155,18 +155,10 @@ } }, "web/app/account/(commonLayout)/account-page/email-change-modal.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, "ts/no-explicit-any": { "count": 5 } }, - "web/app/account/(commonLayout)/account-page/index.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/account/(commonLayout)/delete-account/components/feed-back.tsx": { "no-restricted-imports": { "count": 1 @@ -210,6 +202,11 @@ "count": 1 } }, + "web/app/components/app/annotation/add-annotation-modal/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/app/annotation/batch-add-annotation-modal/index.tsx": { "erasable-syntax-only/enums": { "count": 1 @@ -238,6 +235,11 @@ "count": 1 } }, + "web/app/components/app/annotation/edit-annotation-modal/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/app/annotation/header-opts/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -260,6 +262,9 @@ "erasable-syntax-only/enums": { "count": 1 }, + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 5 }, @@ -277,11 +282,6 @@ "count": 4 } }, - "web/app/components/app/app-publisher/index.tsx": { - "ts/no-explicit-any": { - "count": 5 - } - }, "web/app/components/app/app-publisher/version-info-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -323,20 +323,12 @@ "count": 4 } }, - "web/app/components/app/configuration/config-var/config-modal/type-select.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config-var/index.tsx": { "no-restricted-imports": { "count": 1 } }, "web/app/components/app/configuration/config-var/select-var-type.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -360,6 +352,9 @@ } }, "web/app/components/app/configuration/config/agent/agent-tools/setting-built-in-tool.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react-hooks/exhaustive-deps": { "count": 1 }, @@ -371,9 +366,6 @@ } }, "web/app/components/app/configuration/config/assistant-type-picker/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -409,11 +401,6 @@ "count": 1 } }, - "web/app/components/app/configuration/config/automatic/version-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config/code-generator/get-code-generator-res.tsx": { "no-restricted-imports": { "count": 1 @@ -425,6 +412,16 @@ "count": 2 } }, + "web/app/components/app/configuration/configuration-view.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, + "web/app/components/app/configuration/dataset-config/card-item/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/app/configuration/dataset-config/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -443,11 +440,6 @@ "count": 1 } }, - "web/app/components/app/configuration/dataset-config/select-dataset/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/dataset-config/settings-modal/index.tsx": { "react/set-state-in-effect": { "count": 2 @@ -560,6 +552,9 @@ } }, "web/app/components/app/log/list.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 6 }, @@ -575,30 +570,6 @@ "count": 2 } }, - "web/app/components/app/overview/customize/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/app/overview/embedded/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, - "web/app/components/app/overview/settings/index.tsx": { - "no-restricted-imports": { - "count": 2 - }, - "react/set-state-in-effect": { - "count": 3 - }, - "regexp/no-unused-capturing-group": { - "count": 1 - } - }, "web/app/components/app/overview/trigger-card.tsx": { "ts/no-explicit-any": { "count": 1 @@ -633,6 +604,9 @@ } }, "web/app/components/app/workflow-log/list.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 2 } @@ -642,25 +616,6 @@ "count": 1 } }, - "web/app/components/apps/app-card.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/set-state-in-effect": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, - "web/app/components/apps/list.tsx": { - "react-hooks/exhaustive-deps": { - "count": 1 - }, - "react/unsupported-syntax": { - "count": 2 - } - }, "web/app/components/apps/new-app-card.tsx": { "react-hooks-extra/no-direct-set-state-in-use-effect": { "count": 1 @@ -830,11 +785,6 @@ "count": 1 } }, - "web/app/components/base/chat/chat-with-history/sidebar/rename-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/chat/chat/answer/agent-content.tsx": { "style/multiline-ternary": { "count": 2 @@ -856,11 +806,6 @@ "count": 1 } }, - "web/app/components/base/chat/chat/answer/operation.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/base/chat/chat/answer/workflow-process.tsx": { "react/set-state-in-effect": { "count": 1 @@ -986,6 +931,11 @@ "count": 1 } }, + "web/app/components/base/drawer-plus/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/base/emoji-picker/index.tsx": { "no-restricted-imports": { "count": 1 @@ -1111,12 +1061,9 @@ "count": 3 } }, - "web/app/components/base/form/components/base/base-field.tsx": { + "web/app/components/base/float-right-container/index.tsx": { "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 3 + "count": 2 } }, "web/app/components/base/form/components/base/base-form.tsx": { @@ -1323,7 +1270,7 @@ }, "web/app/components/base/icons/src/vender/line/development/index.ts": { "no-barrel-files/no-barrel-files": { - "count": 2 + "count": 1 } }, "web/app/components/base/icons/src/vender/line/editor/index.ts": { @@ -1645,14 +1592,6 @@ "count": 1 } }, - "web/app/components/base/modal/modal.stories.tsx": { - "no-console": { - "count": 4 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/base/new-audio-button/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -1877,26 +1816,6 @@ "count": 1 } }, - "web/app/components/base/tag-management/__tests__/panel.spec.tsx": { - "ts/no-explicit-any": { - "count": 2 - } - }, - "web/app/components/base/tag-management/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/base/tag-management/tag-item-editor.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/base/tag-management/tag-remove-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/text-generation/hooks.ts": { "ts/no-explicit-any": { "count": 1 @@ -1974,11 +1893,6 @@ "count": 4 } }, - "web/app/components/billing/plan/index.tsx": { - "ts/no-explicit-any": { - "count": 2 - } - }, "web/app/components/billing/pricing/assets/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 12 @@ -2197,11 +2111,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/create-from-pipeline/data-source/base/credential-selector/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -2272,14 +2181,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/batch-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/common/chunk-content.tsx": { "react/set-state-in-effect": { "count": 1 @@ -2290,11 +2191,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/components/index.ts": { - "no-barrel-files/no-barrel-files": { - "count": 3 - } - }, "web/app/components/datasets/documents/detail/completed/components/segment-list-content.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 @@ -2359,14 +2255,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/segment-add/index.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, - "react-refresh/only-export-components": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/settings/pipeline-settings/index.tsx": { "ts/no-explicit-any": { "count": 6 @@ -2408,12 +2296,10 @@ } }, "web/app/components/datasets/hit-testing/index.tsx": { - "react/unsupported-syntax": { + "no-restricted-imports": { "count": 1 - } - }, - "web/app/components/datasets/list/dataset-card/hooks/use-dataset-card-state.ts": { - "react/set-state-in-effect": { + }, + "react/unsupported-syntax": { "count": 1 } }, @@ -2452,7 +2338,7 @@ }, "web/app/components/datasets/metadata/metadata-dataset/dataset-metadata-drawer.tsx": { "no-restricted-imports": { - "count": 2 + "count": 3 } }, "web/app/components/datasets/metadata/metadata-dataset/select-metadata-modal.tsx": { @@ -2475,11 +2361,6 @@ "count": 1 } }, - "web/app/components/datasets/settings/index-method/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/develop/code.tsx": { "ts/no-empty-object-type": { "count": 1 @@ -2522,17 +2403,6 @@ "count": 2 } }, - "web/app/components/explore/create-app-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 1 - }, - "unicorn/prefer-number-properties": { - "count": 1 - } - }, "web/app/components/explore/item-operation/index.tsx": { "react/set-state-in-effect": { "count": 1 @@ -2676,11 +2546,8 @@ "erasable-syntax-only/enums": { "count": 1 }, - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { - "count": 3 + "count": 2 } }, "web/app/components/header/account-setting/model-provider-page/declarations.ts": { @@ -2709,11 +2576,6 @@ "count": 1 } }, - "web/app/components/header/account-setting/model-provider-page/model-auth/credential-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/model-auth/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 6 @@ -2747,14 +2609,6 @@ "count": 3 } }, - "web/app/components/header/account-setting/model-provider-page/model-modal/Form.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 6 - } - }, "web/app/components/header/account-setting/model-provider-page/model-modal/Input.tsx": { "unicorn/prefer-number-properties": { "count": 2 @@ -2824,14 +2678,6 @@ "count": 4 } }, - "web/app/components/header/app-nav/index.tsx": { - "react/set-state-in-effect": { - "count": 2 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/header/header-wrapper.tsx": { "ts/no-explicit-any": { "count": 1 @@ -2986,10 +2832,18 @@ } }, "web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 7 } }, + "web/app/components/plugins/plugin-detail-panel/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/plugins/plugin-detail-panel/model-list.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3011,6 +2865,9 @@ } }, "web/app/components/plugins/plugin-detail-panel/strategy-detail.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 2 } @@ -3025,44 +2882,11 @@ "count": 1 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/create/hooks/use-oauth-client-state.ts": { - "erasable-syntax-only/enums": { - "count": 2 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/create/index.tsx": { - "no-barrel-files/no-barrel-files": { - "count": 3 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/create/oauth-client.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/create/types.ts": { "erasable-syntax-only/enums": { "count": 1 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/edit/apikey-edit-modal.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/edit/manual-edit-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/edit/oauth-edit-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 2 @@ -3091,11 +2915,6 @@ "count": 7 } }, - "web/app/components/plugins/plugin-detail-panel/tool-selector/components/schema-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx": { "no-restricted-imports": { "count": 1 @@ -3107,13 +2926,11 @@ } }, "web/app/components/plugins/plugin-detail-panel/trigger/event-detail-drawer.tsx": { - "ts/no-explicit-any": { - "count": 5 - } - }, - "web/app/components/plugins/plugin-item/action.tsx": { "no-restricted-imports": { "count": 1 + }, + "ts/no-explicit-any": { + "count": 5 } }, "web/app/components/plugins/plugin-item/index.tsx": { @@ -3149,16 +2966,6 @@ "count": 1 } }, - "web/app/components/plugins/readme-panel/index.tsx": { - "react/unsupported-syntax": { - "count": 1 - } - }, - "web/app/components/plugins/readme-panel/store.ts": { - "erasable-syntax-only/enums": { - "count": 1 - } - }, "web/app/components/plugins/reference-setting-modal/auto-update-setting/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -3386,7 +3193,7 @@ }, "web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx": { "no-restricted-imports": { - "count": 1 + "count": 2 } }, "web/app/components/tools/edit-custom-collection-modal/get-schema.tsx": { @@ -3395,6 +3202,9 @@ } }, "web/app/components/tools/edit-custom-collection-modal/index.tsx": { + "no-restricted-imports": { + "count": 1 + }, "react/set-state-in-effect": { "count": 4 }, @@ -3403,6 +3213,9 @@ } }, "web/app/components/tools/edit-custom-collection-modal/test-api.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 1 } @@ -3412,6 +3225,11 @@ "count": 1 } }, + "web/app/components/tools/mcp/detail/provider-detail.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/tools/mcp/mcp-server-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -3440,12 +3258,20 @@ "count": 1 } }, + "web/app/components/tools/provider/detail.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/tools/provider/empty.tsx": { "ts/no-explicit-any": { "count": 1 } }, "web/app/components/tools/setting/build-in/config-credentials.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 3 } @@ -3620,11 +3446,6 @@ "count": 1 } }, - "web/app/components/workflow/dsl-export-confirm-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/header/run-mode.tsx": { "no-console": { "count": 1 @@ -3663,7 +3484,7 @@ }, "web/app/components/workflow/hooks/index.ts": { "no-barrel-files/no-barrel-files": { - "count": 26 + "count": 25 } }, "web/app/components/workflow/hooks/use-checklist.ts": { @@ -3799,11 +3620,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/error-handle/error-handle-type-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/error-handle/types.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -3900,11 +3716,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/variable/var-type-picker.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/variable/variable-label/hooks.ts": { "react/no-unnecessary-use-prefix": { "count": 2 @@ -4224,16 +4035,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/if-else/components/condition-list/condition-operator.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/if-else/components/condition-number-input.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/if-else/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4269,11 +4070,6 @@ "count": 6 } }, - "web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/hooks.tsx": { "ts/no-explicit-any": { "count": 4 @@ -4307,31 +4103,21 @@ "count": 1 } }, + "web/app/components/workflow/nodes/knowledge-retrieval/components/dataset-item.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-item.tsx": { "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-operator.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-value-method.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx": { "no-restricted-imports": { "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/metadata-filter-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-retrieval/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4413,11 +4199,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/edit-card/type-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/hooks.ts": { "ts/no-explicit-any": { "count": 1 @@ -4469,16 +4250,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/loop/components/condition-list/condition-operator.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/loop/components/condition-number-input.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/loop-variables/form-item.tsx": { "ts/no-explicit-any": { "count": 3 @@ -4650,9 +4421,6 @@ } }, "web/app/components/workflow/nodes/tool/components/tool-form/item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -5178,11 +4946,6 @@ "count": 5 } }, - "web/app/education-apply/verify-state-modal.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/forgot-password/ForgotPasswordForm.spec.tsx": { "ts/no-explicit-any": { "count": 5 @@ -5404,11 +5167,6 @@ "count": 1 } }, - "web/plugins/dev-proxy/server.spec.ts": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/scripts/component-analyzer.js": { "regexp/no-unused-capturing-group": { "count": 6 @@ -5462,11 +5220,6 @@ "count": 2 } }, - "web/service/knowledge/use-dataset.ts": { - "@tanstack/query/exhaustive-deps": { - "count": 1 - } - }, "web/service/share.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -5480,11 +5233,6 @@ "count": 2 } }, - "web/service/use-apps.ts": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/service/use-common.ts": { "ts/no-empty-object-type": { "count": 1 diff --git a/eslint.config.mjs b/eslint.config.mjs index ae9fdaff01..1380ed67d2 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -4,6 +4,17 @@ import antfu, { GLOB_MARKDOWN } from '@antfu/eslint-config' import md from 'eslint-markdown' import markdownPreferences from 'eslint-plugin-markdown-preferences' +const GENERATED_IGNORES = [ + '**/storybook-static/', + '**/.next/', + 'web/next/', + 'web/next-env.d.ts', + '**/dist/', + '**/coverage/', + 'e2e/.auth/', + 'e2e/cucumber-report/', +] + export default antfu( { ignores: original => [ @@ -15,6 +26,7 @@ export default antfu( '!package.json', '!pnpm-workspace.yaml', '!vite.config.ts', + ...GENERATED_IGNORES, ...original, ], typescript: { diff --git a/package.json b/package.json index a563b574f7..9ef6b4ef4e 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "dify", "type": "module", "private": true, - "packageManager": "pnpm@11.0.0", + "packageManager": "pnpm@11.0.8", "engines": { "node": "^22.22.1" }, diff --git a/packages/contracts/generated/api/console/apps/types.gen.ts b/packages/contracts/generated/api/console/apps/types.gen.ts index fe4c10329e..4a4742adcf 100644 --- a/packages/contracts/generated/api/console/apps/types.gen.ts +++ b/packages/contracts/generated/api/console/apps/types.gen.ts @@ -4156,8 +4156,8 @@ export type GetAppsByAppIdWorkflowsDraftVariablesResponse export type DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdData = { body?: never path: { - app_id: string variable_id: string + app_id: string } query?: never url: '/apps/{app_id}/workflows/draft/variables/{variable_id}' @@ -4210,8 +4210,8 @@ export type GetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse export type PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdData = { body: WorkflowDraftVariableUpdatePayload path: { - app_id: string variable_id: string + app_id: string } query?: never url: '/apps/{app_id}/workflows/draft/variables/{variable_id}' diff --git a/packages/contracts/generated/api/console/apps/zod.gen.ts b/packages/contracts/generated/api/console/apps/zod.gen.ts index dcaeaed246..9798d22cc0 100644 --- a/packages/contracts/generated/api/console/apps/zod.gen.ts +++ b/packages/contracts/generated/api/console/apps/zod.gen.ts @@ -2980,8 +2980,8 @@ export const zGetAppsByAppIdWorkflowsDraftVariablesQuery = z.object({ export const zGetAppsByAppIdWorkflowsDraftVariablesResponse = zWorkflowDraftVariableListWithoutValue export const zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdPath = z.object({ - app_id: z.string(), variable_id: z.string(), + app_id: z.string(), }) /** @@ -3006,8 +3006,8 @@ export const zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdBody = zWorkflowDraftVariableUpdatePayload export const zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdPath = z.object({ - app_id: z.string(), variable_id: z.string(), + app_id: z.string(), }) /** diff --git a/packages/contracts/generated/api/console/datasets/types.gen.ts b/packages/contracts/generated/api/console/datasets/types.gen.ts index 89a68593b7..61d380d686 100644 --- a/packages/contracts/generated/api/console/datasets/types.gen.ts +++ b/packages/contracts/generated/api/console/datasets/types.gen.ts @@ -255,6 +255,7 @@ export type ProcessRule = { } export type RetrievalModel = { + metadata_filtering_conditions?: MetadataFilteringCondition reranking_enable: boolean reranking_mode?: string | null reranking_model?: RerankingModel @@ -312,6 +313,11 @@ export type Rule = { subchunk_segmentation?: Segmentation } +export type MetadataFilteringCondition = { + conditions?: Array | null + logical_operator?: 'and' | 'or' | null +} + export type RerankingModel = { reranking_model_name?: string | null reranking_provider_name?: string | null @@ -405,6 +411,30 @@ export type Segmentation = { separator?: string } +export type Condition = { + comparison_operator: + | 'contains' + | 'not contains' + | 'start with' + | 'end with' + | 'is' + | 'is not' + | 'empty' + | 'not empty' + | 'in' + | 'not in' + | '=' + | '≠' + | '>' + | '<' + | '≥' + | '≤' + | 'before' + | 'after' + name: string + value?: unknown +} + export type WeightKeywordSetting = { keyword_weight: number } @@ -1174,8 +1204,8 @@ export type PatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponse export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdData = { body?: never path: { - dataset_id: string document_id: string + dataset_id: string } query?: never url: '/datasets/{dataset_id}/documents/{document_id}' diff --git a/packages/contracts/generated/api/console/datasets/zod.gen.ts b/packages/contracts/generated/api/console/datasets/zod.gen.ts index 2ac2cbfd1f..76491c52a0 100644 --- a/packages/contracts/generated/api/console/datasets/zod.gen.ts +++ b/packages/contracts/generated/api/console/datasets/zod.gen.ts @@ -392,6 +392,46 @@ export const zProcessRule = z.object({ rules: zRule.optional(), }) +/** + * Condition + * + * Condition detail + */ +export const zCondition = z.object({ + comparison_operator: z.enum([ + 'contains', + 'not contains', + 'start with', + 'end with', + 'is', + 'is not', + 'empty', + 'not empty', + 'in', + 'not in', + '=', + '≠', + '>', + '<', + '≥', + '≤', + 'before', + 'after', + ]), + name: z.string(), + value: z.unknown().optional(), +}) + +/** + * MetadataFilteringCondition + * + * Metadata Filtering Condition. + */ +export const zMetadataFilteringCondition = z.object({ + conditions: z.array(zCondition).nullish(), + logical_operator: z.enum(['and', 'or']).nullish().default('and'), +}) + /** * WeightKeywordSetting */ @@ -421,6 +461,7 @@ export const zWeightModel = z.object({ * RetrievalModel */ export const zRetrievalModel = z.object({ + metadata_filtering_conditions: zMetadataFilteringCondition.optional(), reranking_enable: z.boolean(), reranking_mode: z.string().nullish(), reranking_model: zRerankingModel.optional(), @@ -925,8 +966,8 @@ export const zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponse = z.r ) export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath = z.object({ - dataset_id: z.string(), document_id: z.string(), + dataset_id: z.string(), }) /** diff --git a/packages/contracts/generated/api/service/types.gen.ts b/packages/contracts/generated/api/service/types.gen.ts index f491c1e3f9..e3791e295c 100644 --- a/packages/contracts/generated/api/service/types.gen.ts +++ b/packages/contracts/generated/api/service/types.gen.ts @@ -325,8 +325,37 @@ export type WorkflowRunResponse = { workflow_id: string } +export type Condition = { + comparison_operator: + | 'contains' + | 'not contains' + | 'start with' + | 'end with' + | 'is' + | 'is not' + | 'empty' + | 'not empty' + | 'in' + | 'not in' + | '=' + | '≠' + | '>' + | '<' + | '≥' + | '≤' + | 'before' + | 'after' + name: string + value?: unknown +} + export type DatasetPermissionEnum = 'only_me' | 'all_team_members' | 'partial_members' +export type MetadataFilteringCondition = { + conditions?: Array | null + logical_operator?: 'and' | 'or' | null +} + export type RerankingModel = { reranking_model_name?: string | null reranking_provider_name?: string | null @@ -339,6 +368,7 @@ export type RetrievalMethod | 'keyword_search' export type RetrievalModel = { + metadata_filtering_conditions?: MetadataFilteringCondition reranking_enable: boolean reranking_mode?: string | null reranking_model?: RerankingModel @@ -1833,8 +1863,8 @@ export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdData = body?: never path: { segment_id: string - dataset_id: string document_id: string + dataset_id: string } query?: never url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' diff --git a/packages/contracts/generated/api/service/zod.gen.ts b/packages/contracts/generated/api/service/zod.gen.ts index 2c2400c0cb..6feacbdead 100644 --- a/packages/contracts/generated/api/service/zod.gen.ts +++ b/packages/contracts/generated/api/service/zod.gen.ts @@ -326,11 +326,51 @@ export const zWorkflowRunResponse = z.object({ workflow_id: z.string(), }) +/** + * Condition + * + * Condition detail + */ +export const zCondition = z.object({ + comparison_operator: z.enum([ + 'contains', + 'not contains', + 'start with', + 'end with', + 'is', + 'is not', + 'empty', + 'not empty', + 'in', + 'not in', + '=', + '≠', + '>', + '<', + '≥', + '≤', + 'before', + 'after', + ]), + name: z.string(), + value: z.unknown().optional(), +}) + /** * DatasetPermissionEnum */ export const zDatasetPermissionEnum = z.enum(['only_me', 'all_team_members', 'partial_members']) +/** + * MetadataFilteringCondition + * + * Metadata Filtering Condition. + */ +export const zMetadataFilteringCondition = z.object({ + conditions: z.array(zCondition).nullish(), + logical_operator: z.enum(['and', 'or']).nullish().default('and'), +}) + /** * RerankingModel */ @@ -378,6 +418,7 @@ export const zWeightModel = z.object({ * RetrievalModel */ export const zRetrievalModel = z.object({ + metadata_filtering_conditions: zMetadataFilteringCondition.optional(), reranking_enable: z.boolean(), reranking_mode: z.string().nullish(), reranking_model: zRerankingModel.optional(), @@ -1082,8 +1123,8 @@ export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdR export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath = z.object({ segment_id: z.string(), - dataset_id: z.string(), document_id: z.string(), + dataset_id: z.string(), }) /** diff --git a/packages/dev-proxy/README.md b/packages/dev-proxy/README.md new file mode 100644 index 0000000000..6b9d7298c4 --- /dev/null +++ b/packages/dev-proxy/README.md @@ -0,0 +1,196 @@ +# @langgenius/dev-proxy + +Generic Hono-based development proxy for frontend projects. The package does not ship any product-specific routes, cookie names, or environment variable conventions. Every proxied path and upstream target is declared in a local config file. + +## Installation + +```bash +pnpm add -D @langgenius/dev-proxy +``` + +Add a script in your frontend project: + +```json +{ + "scripts": { + "dev:proxy": "dev-proxy --config ./dev-proxy.config.ts --env-file ./.env" + } +} +``` + +Run it with: + +```bash +pnpm dev:proxy +``` + +## CLI + +```bash +dev-proxy --config ./dev-proxy.config.ts +``` + +Supported options: + +- `--config`, `-c`: config file path. Defaults to `dev-proxy.config.ts`. +- `--env-file`: load environment variables before evaluating the config file. +- `--host`: override `server.host` from config. +- `--port`: override `server.port` from config. +- `--help`, `-h`: print help. + +`--target` is not supported. Put targets in the config file so routes and upstreams stay explicit. + +## Config Shape + +```ts +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +export default defineDevProxyConfig({ + server: { + host: '127.0.0.1', + port: 5001, + }, + routes: [ + { + paths: '/api', + target: 'https://example.com', + }, + ], + cors: { + allowedOrigins: 'local', + }, +}) +``` + +Config files can be `.ts`, `.mts`, `.js`, or `.mjs`. + +`routes` are matched in declaration order. The first matching route wins. Each configured path matches both the exact path and all child paths, so `paths: '/api'` matches `/api`, `/api/apps`, and `/api/apps/123`. + +By default, credentialed CORS is allowed for local development origins such as `localhost`, `127.0.0.1`, and `::1`. To restrict it to specific origins: + +``` +cors: { + allowedOrigins: ['http://localhost:3000'], +} +``` + +## Scenario 1: Proxy One Local Route Group To An Online Backend + +Use this when a local frontend should call an online backend through one proxy server. For example, the frontend calls `http://127.0.0.1:5001/api/apps`, and the proxy forwards it to `https://cloud.example.com/api/apps`. + +```ts +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +const target = process.env.DEV_PROXY_TARGET || 'https://cloud.example.com' + +export default defineDevProxyConfig({ + server: { + host: process.env.DEV_PROXY_HOST || '127.0.0.1', + port: Number(process.env.DEV_PROXY_PORT || 5001), + }, + routes: [ + { + paths: '/api', + target, + }, + ], +}) +``` + +Optional `.env`: + +```env +DEV_PROXY_TARGET=https://cloud.example.com +DEV_PROXY_HOST=127.0.0.1 +DEV_PROXY_PORT=5001 +``` + +Command: + +```bash +dev-proxy --config ./dev-proxy.config.ts --env-file ./.env +``` + +## Scenario 2: Proxy Two Route Groups To Two Local Backends + +Use this when one frontend needs to talk to two different local services. For example: + +- `/console/api/*` goes to a local console backend at `http://127.0.0.1:5001` +- `/api/*` goes to a local public API backend at `http://127.0.0.1:5002` + +```ts +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +const consoleApiTarget = process.env.DEV_PROXY_CONSOLE_API_TARGET || 'http://127.0.0.1:5001' +const publicApiTarget = process.env.DEV_PROXY_PUBLIC_API_TARGET || 'http://127.0.0.1:5002' + +export default defineDevProxyConfig({ + server: { + host: process.env.DEV_PROXY_HOST || '127.0.0.1', + port: Number(process.env.DEV_PROXY_PORT || 8082), + }, + routes: [ + { + paths: '/console/api', + target: consoleApiTarget, + }, + { + paths: '/api', + target: publicApiTarget, + }, + ], +}) +``` + +Optional `.env`: + +```env +DEV_PROXY_CONSOLE_API_TARGET=http://127.0.0.1:5001 +DEV_PROXY_PUBLIC_API_TARGET=http://127.0.0.1:5002 +DEV_PROXY_HOST=127.0.0.1 +DEV_PROXY_PORT=8082 +``` + +When two route groups overlap, put the more specific one first: + +```ts +routes: [ + { paths: '/api/enterprise', target: 'http://127.0.0.1:5003' }, + { paths: '/api', target: 'http://127.0.0.1:5002' }, +] +``` + +## Cookie Rewrite + +Cookie rewriting is opt-in and config-driven. The package does not know any application cookie names. + +Use `cookieRewrite` when an upstream uses secure cookie prefixes such as `__Host-` or `__Secure-`, but local development needs cookies to work over `http://localhost`. + +```ts +import type { CookieRewriteOptions } from '@langgenius/dev-proxy' +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +const cookieRewrite: CookieRewriteOptions = { + hostPrefixCookies: ['access_token', 'refresh_token', /^passport-/], +} + +export default defineDevProxyConfig({ + routes: [ + { + paths: '/api', + target: 'https://cloud.example.com', + cookieRewrite, + }, + ], +}) +``` + +Set `cookieRewrite: false` to disable cookie rewriting for a route. + +## Behavior + +- The proxy preserves the matched path prefix when forwarding requests. +- Request bodies are forwarded as streams. +- Hop-by-hop headers are removed before forwarding. +- Local credentialed CORS and preflight requests are handled by the proxy. +- Route matching is explicit and order-sensitive. diff --git a/packages/dev-proxy/bin/dev-proxy.js b/packages/dev-proxy/bin/dev-proxy.js new file mode 100755 index 0000000000..02e37f3525 --- /dev/null +++ b/packages/dev-proxy/bin/dev-proxy.js @@ -0,0 +1,3 @@ +#!/usr/bin/env node + +import '../dist/cli.mjs' diff --git a/packages/dev-proxy/package.json b/packages/dev-proxy/package.json new file mode 100644 index 0000000000..d5524290eb --- /dev/null +++ b/packages/dev-proxy/package.json @@ -0,0 +1,43 @@ +{ + "name": "@langgenius/dev-proxy", + "type": "module", + "version": "0.0.5", + "exports": { + ".": { + "types": "./dist/index.d.mts", + "import": "./dist/index.mjs" + } + }, + "types": "./dist/index.d.mts", + "bin": { + "dev-proxy": "./bin/dev-proxy.js" + }, + "files": [ + "bin", + "dist", + "src" + ], + "engines": { + "node": "^22.22.1" + }, + "scripts": { + "build": "vp pack", + "prepare": "pnpm run build", + "test": "vp test", + "type-check": "tsgo", + "prepublish": "pnpm run build" + }, + "dependencies": { + "@hono/node-server": "catalog:", + "c12": "catalog:", + "hono": "catalog:" + }, + "devDependencies": { + "@dify/tsconfig": "workspace:*", + "@types/node": "catalog:", + "@typescript/native-preview": "catalog:", + "vite": "catalog:", + "vite-plus": "catalog:", + "vitest": "catalog:" + } +} diff --git a/packages/dev-proxy/src/cli.spec.ts b/packages/dev-proxy/src/cli.spec.ts new file mode 100644 index 0000000000..e8a87a0588 --- /dev/null +++ b/packages/dev-proxy/src/cli.spec.ts @@ -0,0 +1,158 @@ +/** + * @vitest-environment node + */ +import type { ChildProcessByStdio } from 'node:child_process' +import type { Readable } from 'node:stream' +import { spawn } from 'node:child_process' +import { once } from 'node:events' +import fs from 'node:fs/promises' +import net from 'node:net' +import os from 'node:os' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { afterEach, describe, expect, it } from 'vitest' + +const tempDirs: string[] = [] +type DevProxyCliProcess = ChildProcessByStdio + +const childProcesses: DevProxyCliProcess[] = [] +const binPath = fileURLToPath(new URL('../bin/dev-proxy.js', import.meta.url)) + +const createTempDir = async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'dev-proxy-cli-test-')) + tempDirs.push(tempDir) + return tempDir +} + +const getFreePort = async () => { + const server = net.createServer() + await new Promise((resolve, reject) => { + server.once('error', reject) + server.listen(0, '127.0.0.1', resolve) + }) + + const address = server.address() + if (!address || typeof address === 'string') + throw new Error('Failed to allocate a test port.') + + const { port } = address + await new Promise((resolve, reject) => { + server.close((error) => { + if (error) + reject(error) + else + resolve() + }) + }) + + return port +} + +const waitForOutput = ( + child: DevProxyCliProcess, + output: () => string, + expectedOutput: string, +) => new Promise((resolve, reject) => { + let timeout: ReturnType + + function cleanup() { + clearTimeout(timeout) + child.stdout.off('data', onData) + child.stderr.off('data', onData) + child.off('exit', onExit) + } + + function onData() { + if (!output().includes(expectedOutput)) + return + + cleanup() + resolve() + } + + function onExit(code: number | null, signal: NodeJS.Signals | null) { + cleanup() + reject(new Error(`dev-proxy exited before writing "${expectedOutput}" with code ${code} and signal ${signal}. Output:\n${output()}`)) + } + + timeout = setTimeout(() => { + cleanup() + reject(new Error(`Timed out waiting for "${expectedOutput}". Output:\n${output()}`)) + }, 3000) + + child.stdout.on('data', onData) + child.stderr.on('data', onData) + child.once('exit', onExit) + onData() +}) + +const spawnCli = (args: readonly string[], cwd: string) => { + const child = spawn(process.execPath, [binPath, ...args], { + cwd, + env: { + ...process.env, + FORCE_COLOR: '0', + }, + stdio: ['ignore', 'pipe', 'pipe'], + }) + childProcesses.push(child) + return child +} + +const stopChildProcess = async (child: DevProxyCliProcess) => { + if (child.exitCode !== null || child.signalCode !== null) + return + + child.kill('SIGTERM') + await once(child, 'exit') +} + +describe('dev proxy CLI', () => { + afterEach(async () => { + await Promise.all(childProcesses.splice(0).map(stopChildProcess)) + await Promise.all(tempDirs.splice(0).map(tempDir => fs.rm(tempDir, { + force: true, + recursive: true, + }))) + }) + + // Scenario: help output should still be a normal short-lived command. + it('should print help and exit', async () => { + // Arrange + const tempDir = await createTempDir() + const child = spawnCli(['--help'], tempDir) + + // Act + const [code] = await once(child, 'exit') + + // Assert + expect(code).toBe(0) + }) + + // Scenario: successful server startup should keep the CLI process alive. + it('should keep running after starting the proxy server', async () => { + // Arrange + const tempDir = await createTempDir() + const port = await getFreePort() + await fs.writeFile(path.join(tempDir, 'dev-proxy.config.ts'), ` + export default { + routes: [{ paths: '/api', target: 'https://api.example.com' }], + } + `) + + let output = '' + const child = spawnCli(['--config', './dev-proxy.config.ts', '--host', '127.0.0.1', '--port', String(port)], tempDir) + child.stdout.on('data', chunk => output += chunk.toString()) + child.stderr.on('data', chunk => output += chunk.toString()) + + // Act + await waitForOutput(child, () => output, `[dev-proxy] listening on http://127.0.0.1:${port}`) + await new Promise(resolve => setTimeout(resolve, 100)) + const response = await fetch(`http://127.0.0.1:${port}/not-proxied`) + + // Assert + expect(child.exitCode).toBeNull() + expect(child.signalCode).toBeNull() + expect(response.status).toBe(404) + }) +}) diff --git a/packages/dev-proxy/src/cli.ts b/packages/dev-proxy/src/cli.ts new file mode 100644 index 0000000000..05234cb359 --- /dev/null +++ b/packages/dev-proxy/src/cli.ts @@ -0,0 +1,56 @@ +import process from 'node:process' +import { serve } from '@hono/node-server' +import { loadDevProxyConfig, parseDevProxyCliArgs, resolveDevProxyServerOptions } from './config' +import { createDevProxyApp } from './server' + +function printUsage() { + console.log(`Usage: + dev-proxy --config [options] + +Options: + --config, -c Path to a dev proxy config file. Defaults to dev-proxy.config.ts. + --env-file Load environment variables before evaluating the config file. + --host Override the configured host. + --port Override the configured port. + --help, -h Show this help message.`) +} + +async function flushStandardStreams() { + await Promise.all([ + new Promise(resolve => process.stdout.write('', () => resolve())), + new Promise(resolve => process.stderr.write('', () => resolve())), + ]) +} + +async function main() { + const cliOptions = parseDevProxyCliArgs(process.argv.slice(2)) + + if (cliOptions.help) { + printUsage() + return + } + + const config = await loadDevProxyConfig(cliOptions.config, process.cwd(), { + envFile: cliOptions.envFile, + }) + const { host, port } = resolveDevProxyServerOptions(config.server, cliOptions) + const app = createDevProxyApp(config) + + serve({ + fetch: app.fetch, + hostname: host, + port, + }) + + console.log(`[dev-proxy] listening on http://${host}:${port}`) +} + +try { + await main() + await flushStandardStreams() +} +catch (error) { + console.error(error instanceof Error ? error.message : error) + await flushStandardStreams() + process.exit(1) +} diff --git a/packages/dev-proxy/src/config.spec.ts b/packages/dev-proxy/src/config.spec.ts new file mode 100644 index 0000000000..6f681bcbae --- /dev/null +++ b/packages/dev-proxy/src/config.spec.ts @@ -0,0 +1,145 @@ +/** + * @vitest-environment node + */ +import fs from 'node:fs/promises' +import os from 'node:os' +import path from 'node:path' +import { afterEach, describe, expect, it } from 'vitest' +import { loadDevProxyConfig, parseDevProxyCliArgs, resolveDevProxyServerOptions } from './config' + +const tempDirs: string[] = [] + +const createTempDir = async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'dev-proxy-test-')) + tempDirs.push(tempDir) + return tempDir +} + +describe('dev proxy config', () => { + afterEach(async () => { + delete process.env.DEV_PROXY_TEST_PORT + delete process.env.DEV_PROXY_TEST_TARGET + + await Promise.all(tempDirs.splice(0).map(tempDir => fs.rm(tempDir, { + force: true, + recursive: true, + }))) + }) + + // Scenario: CLI options should support both inline and separated values. + it('should parse proxy CLI options', () => { + // Act + const options = parseDevProxyCliArgs([ + '--config=./dev-proxy.config.ts', + '--env-file', + './.env.proxy', + '--host', + '0.0.0.0', + '--port', + '8083', + ]) + + // Assert + expect(options).toEqual({ + config: './dev-proxy.config.ts', + envFile: './.env.proxy', + host: '0.0.0.0', + port: '8083', + }) + }) + + // Scenario: removed target shortcuts should fail instead of silently doing the wrong thing. + it('should reject unsupported target shortcuts', () => { + // Assert + expect(() => parseDevProxyCliArgs(['--target', 'enterprise'])).toThrow('Unsupported dev proxy option') + }) + + // Scenario: package manager argument separators should not be treated as proxy options. + it('should ignore package manager argument separators', () => { + // Act + const options = parseDevProxyCliArgs(['--config', './dev-proxy.config.ts', '--', '--help']) + + // Assert + expect(options).toEqual({ + config: './dev-proxy.config.ts', + help: true, + }) + }) + + // Scenario: CLI host and port should override config defaults. + it('should resolve server options with CLI overrides', () => { + // Act + const options = resolveDevProxyServerOptions({ + host: '127.0.0.1', + port: 5001, + }, { + host: '0.0.0.0', + port: '9002', + }) + + // Assert + expect(options).toEqual({ + host: '0.0.0.0', + port: 9002, + }) + }) + + // Scenario: TS config files should load through c12. + it('should load a TypeScript config file', async () => { + // Arrange + const tempDir = await createTempDir() + await fs.writeFile(path.join(tempDir, 'dev-proxy.config.ts'), ` + export default { + server: { host: '127.0.0.1', port: 7777 }, + routes: [{ paths: ['/api', '/files'], target: 'https://api.example.com' }], + } + `) + + // Act + const config = await loadDevProxyConfig('dev-proxy.config.ts', tempDir) + + // Assert + expect(config.server).toEqual({ + host: '127.0.0.1', + port: 7777, + }) + expect(config.routes).toEqual([ + { + paths: ['/api', '/files'], + target: 'https://api.example.com', + }, + ]) + }) + + // Scenario: env files should be loaded before the TypeScript config is evaluated. + it('should load a TypeScript config file with env file values', async () => { + // Arrange + const tempDir = await createTempDir() + await fs.writeFile(path.join(tempDir, '.env.proxy'), [ + 'DEV_PROXY_TEST_PORT=7788', + 'DEV_PROXY_TEST_TARGET=https://env.example.com', + ].join('\n')) + await fs.writeFile(path.join(tempDir, 'dev-proxy.config.ts'), ` + export default { + server: { port: Number(process.env.DEV_PROXY_TEST_PORT) }, + routes: [{ paths: '/api', target: process.env.DEV_PROXY_TEST_TARGET }], + } + `) + + // Act + const config = await loadDevProxyConfig('dev-proxy.config.ts', tempDir, { + envFile: '.env.proxy', + }) + + // Assert + expect(config.server).toEqual({ + port: 7788, + }) + expect(config.routes).toEqual([ + { + paths: '/api', + target: 'https://env.example.com', + }, + ]) + }) +}) diff --git a/packages/dev-proxy/src/config.ts b/packages/dev-proxy/src/config.ts new file mode 100644 index 0000000000..b23cb0a152 --- /dev/null +++ b/packages/dev-proxy/src/config.ts @@ -0,0 +1,129 @@ +import type { DotenvOptions } from 'c12' +import type { DevProxyCliOptions, DevProxyConfig, DevProxyConfigLoadOptions, DevProxyServerConfig, ResolvedDevProxyServerOptions } from './types' +import path from 'node:path' +import { loadConfig } from 'c12' + +const DEFAULT_CONFIG_FILE = 'dev-proxy.config.ts' +const DEFAULT_PROXY_HOST = '127.0.0.1' +const DEFAULT_PROXY_PORT = 5001 + +const OPTION_NAME_TO_KEY = { + '--config': 'config', + '-c': 'config', + '--env-file': 'envFile', + '--host': 'host', + '--port': 'port', +} as const + +type OptionName = keyof typeof OPTION_NAME_TO_KEY + +const isOptionName = (value: string): value is OptionName => value in OPTION_NAME_TO_KEY + +const requireOptionValue = (name: string, value?: string) => { + if (!value || value.startsWith('-')) + throw new Error(`Missing value for ${name}.`) + + return value +} + +export const parseDevProxyCliArgs = (argv: readonly string[]): DevProxyCliOptions => { + const options: DevProxyCliOptions = {} + + for (let index = 0; index < argv.length; index += 1) { + const arg = argv[index]! + + if (arg === '--') + continue + + if (arg === '--help' || arg === '-h') { + options.help = true + continue + } + + const [rawName, inlineValue] = arg.split('=', 2) + const name = rawName ?? '' + + if (!name.startsWith('-')) + continue + + if (!isOptionName(name)) + throw new Error(`Unsupported dev proxy option "${name}".`) + + const key = OPTION_NAME_TO_KEY[name] + options[key] = inlineValue ?? requireOptionValue(name, argv[index + 1]) + + if (inlineValue === undefined) + index += 1 + } + + return options +} + +const resolvePort = (rawPort: string | number) => { + const port = Number(rawPort) + if (!Number.isInteger(port) || port < 1 || port > 65535) + throw new Error(`Invalid proxy port "${rawPort}". Expected an integer between 1 and 65535.`) + + return port +} + +export const resolveDevProxyServerOptions = ( + serverConfig: DevProxyServerConfig = {}, + cliOptions: DevProxyCliOptions = {}, +): ResolvedDevProxyServerOptions => { + const configuredPort = cliOptions.port ?? serverConfig.port ?? DEFAULT_PROXY_PORT + + return { + host: cliOptions.host || serverConfig.host || DEFAULT_PROXY_HOST, + port: resolvePort(configuredPort), + } +} + +const isRecord = (value: unknown): value is Record => + typeof value === 'object' && value !== null + +export function assertDevProxyConfig(config: unknown): asserts config is DevProxyConfig { + if (!isRecord(config)) + throw new Error('Dev proxy config must export an object.') + + if (!Array.isArray(config.routes)) + throw new Error('Dev proxy config must include a routes array.') +} + +const resolveDotenvOptions = ( + envFile: DevProxyConfigLoadOptions['envFile'], + cwd: string, +): DotenvOptions | false => { + if (!envFile) + return false + + const resolvedEnvFilePath = path.resolve(cwd, envFile) + return { + cwd: path.dirname(resolvedEnvFilePath), + fileName: path.basename(resolvedEnvFilePath), + interpolate: true, + } +} + +export const loadDevProxyConfig = async ( + configPath = DEFAULT_CONFIG_FILE, + cwd = process.cwd(), + options: DevProxyConfigLoadOptions = {}, +): Promise => { + const resolvedConfigPath = path.resolve(cwd, configPath) + const parsedPath = path.parse(resolvedConfigPath) + const { config: loadedConfig } = await loadConfig({ + configFile: parsedPath.name, + cwd: parsedPath.dir, + dotenv: resolveDotenvOptions(options.envFile, cwd), + envName: false, + globalRc: false, + packageJson: false, + rcFile: false, + }) + + assertDevProxyConfig(loadedConfig) + return loadedConfig +} + +export const defineDevProxyConfig = (config: DevProxyConfig) => config diff --git a/packages/dev-proxy/src/cookies.spec.ts b/packages/dev-proxy/src/cookies.spec.ts new file mode 100644 index 0000000000..4a1b614eeb --- /dev/null +++ b/packages/dev-proxy/src/cookies.spec.ts @@ -0,0 +1,44 @@ +/** + * @vitest-environment node + */ +import { describe, expect, it } from 'vitest' +import { rewriteCookieHeaderForUpstream, rewriteSetCookieHeadersForLocal } from './cookies' + +describe('dev proxy cookies', () => { + // Scenario: cookie names should only receive secure host prefixes when configured. + it('should rewrite configured cookie names for HTTPS upstream requests', () => { + // Act + const cookieHeader = rewriteCookieHeaderForUpstream('access_token=abc; theme=dark; passport-app=def', { + hostPrefixCookies: ['access_token', /^passport-/], + useHostPrefix: true, + }) + + // Assert + expect(cookieHeader).toBe('__Host-access_token=abc; theme=dark; __Host-passport-app=def') + }) + + // Scenario: HTTP upstreams should keep local cookie names even when rewrite config exists. + it('should keep local cookie names for HTTP upstream requests', () => { + // Act + const cookieHeader = rewriteCookieHeaderForUpstream('access_token=abc; refresh_token=def', { + hostPrefixCookies: ['access_token', 'refresh_token'], + useHostPrefix: false, + }) + + // Assert + expect(cookieHeader).toBe('access_token=abc; refresh_token=def') + }) + + // Scenario: upstream set-cookie headers should be converted into localhost-safe cookies. + it('should rewrite upstream set-cookie headers for local development', () => { + // Act + const cookies = rewriteSetCookieHeadersForLocal([ + '__Host-access_token=abc; Path=/console/api; Domain=cloud.example.com; Secure; SameSite=None; Partitioned', + ]) + + // Assert + expect(cookies).toEqual([ + 'access_token=abc; Path=/; SameSite=Lax', + ]) + }) +}) diff --git a/web/plugins/dev-proxy/cookies.ts b/packages/dev-proxy/src/cookies.ts similarity index 58% rename from web/plugins/dev-proxy/cookies.ts rename to packages/dev-proxy/src/cookies.ts index c606322e96..61fdb6abd4 100644 --- a/web/plugins/dev-proxy/cookies.ts +++ b/packages/dev-proxy/src/cookies.ts @@ -1,4 +1,4 @@ -const DEFAULT_PROXY_TARGET = 'https://cloud.dify.ai' +import type { CookieRewriteOptions } from './types' const SECURE_COOKIE_PREFIX_PATTERN = /^__(Host|Secure)-/ const SAME_SITE_NONE_PATTERN = /^samesite=none$/i @@ -7,39 +7,43 @@ const COOKIE_DOMAIN_PATTERN = /^domain=/i const COOKIE_SECURE_PATTERN = /^secure$/i const COOKIE_PARTITIONED_PATTERN = /^partitioned$/i -const HOST_PREFIX_COOKIE_NAMES = new Set([ - 'access_token', - 'csrf_token', - 'refresh_token', - 'webapp_access_token', -]) +const stripSecureCookiePrefix = (cookieName: string) => cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '') -const isPassportCookie = (cookieName: string) => cookieName.startsWith('passport-') +const matchesCookieName = (cookieName: string, matcher: string | RegExp) => + typeof matcher === 'string' + ? matcher === cookieName + : matcher.test(cookieName) -const shouldUseHostPrefix = (cookieName: string) => { - const normalizedCookieName = cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '') - return HOST_PREFIX_COOKIE_NAMES.has(normalizedCookieName) || isPassportCookie(normalizedCookieName) +const shouldUseHostPrefix = (cookieName: string, options: CookieRewriteOptions) => { + const normalizedCookieName = stripSecureCookiePrefix(cookieName) + + return options.hostPrefixCookies?.some(matcher => matchesCookieName(normalizedCookieName, matcher)) || false } -const toUpstreamCookieName = (cookieName: string) => { +const toUpstreamCookieName = (cookieName: string, options: CookieRewriteOptions) => { if (cookieName.startsWith('__Host-')) return cookieName if (cookieName.startsWith('__Secure-')) - return `__Host-${cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '')}` + return `__Host-${stripSecureCookiePrefix(cookieName)}` - if (!shouldUseHostPrefix(cookieName)) + if (!shouldUseHostPrefix(cookieName, options)) return cookieName return `__Host-${cookieName}` } -const toLocalCookieName = (cookieName: string) => cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '') +export const toLocalCookieName = (cookieName: string) => stripSecureCookiePrefix(cookieName) -export const rewriteCookieHeaderForUpstream = (cookieHeader?: string) => { +export const rewriteCookieHeaderForUpstream = ( + cookieHeader: string | undefined, + options: CookieRewriteOptions & { useHostPrefix?: boolean }, +) => { if (!cookieHeader) return cookieHeader + const { useHostPrefix = true } = options + return cookieHeader .split(/;\s*/) .filter(Boolean) @@ -50,7 +54,11 @@ export const rewriteCookieHeaderForUpstream = (cookieHeader?: string) => { const cookieName = cookie.slice(0, separatorIndex).trim() const cookieValue = cookie.slice(separatorIndex + 1) - return `${toUpstreamCookieName(cookieName)}=${cookieValue}` + const upstreamCookieName = useHostPrefix + ? toUpstreamCookieName(cookieName, options) + : cookieName + + return `${upstreamCookieName}=${cookieValue}` }) .join('; ') } @@ -84,15 +92,5 @@ const rewriteSetCookieValueForLocal = (setCookieValue: string) => { return [`${toLocalCookieName(cookieName)}=${cookieValue}`, ...rewrittenAttributes].join('; ') } -export const rewriteSetCookieHeadersForLocal = (setCookieHeaders?: string | string[]): string[] | undefined => { - if (!setCookieHeaders) - return undefined - - const normalizedHeaders = Array.isArray(setCookieHeaders) - ? setCookieHeaders - : [setCookieHeaders] - - return normalizedHeaders.map(rewriteSetCookieValueForLocal) -} - -export { DEFAULT_PROXY_TARGET } +export const rewriteSetCookieHeadersForLocal = (setCookieHeaders: readonly string[]) => + setCookieHeaders.map(rewriteSetCookieValueForLocal) diff --git a/packages/dev-proxy/src/index.ts b/packages/dev-proxy/src/index.ts new file mode 100644 index 0000000000..e35893b98f --- /dev/null +++ b/packages/dev-proxy/src/index.ts @@ -0,0 +1,22 @@ +export { + assertDevProxyConfig, + defineDevProxyConfig, + loadDevProxyConfig, + parseDevProxyCliArgs, + resolveDevProxyServerOptions, +} from './config' +export { rewriteCookieHeaderForUpstream, rewriteSetCookieHeadersForLocal, toLocalCookieName } from './cookies' +export { buildUpstreamUrl, createDevProxyApp, isAllowedDevOrigin, isAllowedLocalDevOrigin } from './server' +export type { + CookieNameMatcher, + CookieRewriteOptions, + CreateDevProxyAppOptions, + DevProxyCliOptions, + DevProxyConfig, + DevProxyConfigLoadOptions, + DevProxyCorsAllowedOrigins, + DevProxyCorsConfig, + DevProxyRoute, + DevProxyServerConfig, + ResolvedDevProxyServerOptions, +} from './types' diff --git a/packages/dev-proxy/src/server.spec.ts b/packages/dev-proxy/src/server.spec.ts new file mode 100644 index 0000000000..32c16a1807 --- /dev/null +++ b/packages/dev-proxy/src/server.spec.ts @@ -0,0 +1,242 @@ +/** + * @vitest-environment node + */ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { buildUpstreamUrl, createDevProxyApp, isAllowedDevOrigin } from './server' + +describe('dev proxy server', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + // Scenario: target paths should not be duplicated when the incoming route already includes them. + it('should preserve prefixed targets when building upstream URLs', () => { + // Act + const url = buildUpstreamUrl('https://api.example.com/console/api', '/console/api/apps', '?page=1') + + // Assert + expect(url.href).toBe('https://api.example.com/console/api/apps?page=1') + }) + + // Scenario: only localhost dev origins should be reflected for credentialed CORS by default. + it('should only allow local development origins by default', () => { + // Assert + expect(isAllowedDevOrigin('http://localhost:3000')).toBe(true) + expect(isAllowedDevOrigin('http://127.0.0.1:3000')).toBe(true) + expect(isAllowedDevOrigin('https://example.com')).toBe(false) + }) + + // Scenario: explicit CORS origins should support non-local development hosts. + it('should allow explicitly configured origins', () => { + // Assert + expect(isAllowedDevOrigin('https://app.example.com', ['https://app.example.com'])).toBe(true) + expect(isAllowedDevOrigin('https://other.example.com', ['https://app.example.com'])).toBe(false) + }) + + // Scenario: proxy requests should rewrite cookies and surface credentialed CORS headers when configured. + it('should proxy api requests with configured local cookie rewriting', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok', { + status: 200, + headers: [ + ['content-encoding', 'br'], + ['content-length', '123'], + ['set-cookie', '__Host-access_token=abc; Path=/console/api; Domain=cloud.example.com; Secure; SameSite=None'], + ['transfer-encoding', 'chunked'], + ], + })) + const app = createDevProxyApp({ + routes: [ + { + paths: '/console/api', + target: 'https://cloud.example.com', + cookieRewrite: { + hostPrefixCookies: ['access_token'], + }, + }, + ], + fetchImpl, + }) + + // Act + const response = await app.request('http://127.0.0.1:5001/console/api/apps?page=1', { + headers: { + 'Origin': 'http://localhost:3000', + 'Cookie': 'access_token=abc; theme=dark', + 'Accept-Encoding': 'zstd, br, gzip', + }, + }) + + // Assert + expect(fetchImpl).toHaveBeenCalledTimes(1) + expect(fetchImpl).toHaveBeenCalledWith( + new URL('https://cloud.example.com/console/api/apps?page=1'), + expect.objectContaining({ + method: 'GET', + headers: expect.any(Headers), + }), + ) + + const requestHeaders = fetchImpl.mock.calls[0]?.[1]?.headers + if (!(requestHeaders instanceof Headers)) + throw new Error('Expected proxy request headers to be Headers') + + expect(requestHeaders.get('cookie')).toBe('__Host-access_token=abc; theme=dark') + expect(requestHeaders.get('origin')).toBe('https://cloud.example.com') + expect(requestHeaders.get('accept-encoding')).toBe('identity') + expect(response.headers.get('access-control-allow-origin')).toBe('http://localhost:3000') + expect(response.headers.get('access-control-allow-credentials')).toBe('true') + expect(response.headers.get('content-encoding')).toBeNull() + expect(response.headers.get('content-length')).toBeNull() + expect(response.headers.get('transfer-encoding')).toBeNull() + expect(response.headers.getSetCookie()).toEqual([ + 'access_token=abc; Path=/; SameSite=Lax', + ]) + }) + + // Scenario: generic proxy routes should not know Dify cookie names by default. + it('should not rewrite cookie names when cookie rewriting is not configured', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/api', + target: 'https://api.example.com', + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5001/api/messages', { + headers: { + Cookie: 'access_token=abc; refresh_token=def', + }, + }) + + // Assert + const requestHeaders = fetchImpl.mock.calls[0]?.[1]?.headers + if (!(requestHeaders instanceof Headers)) + throw new Error('Expected proxy request headers to be Headers') + + expect(requestHeaders.get('cookie')).toBe('access_token=abc; refresh_token=def') + }) + + // Scenario: local HTTP upstreams expect local cookie names even when cookie rewriting is configured. + it('should keep local cookie names for HTTP upstream targets', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/console/api', + target: 'http://127.0.0.1:5001', + cookieRewrite: { + hostPrefixCookies: ['access_token', 'refresh_token'], + }, + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5010/console/api/account/profile', { + headers: { + Cookie: 'access_token=abc; refresh_token=def', + }, + }) + + // Assert + const requestHeaders = fetchImpl.mock.calls[0]?.[1]?.headers + if (!(requestHeaders instanceof Headers)) + throw new Error('Expected proxy request headers to be Headers') + + expect(requestHeaders.get('cookie')).toBe('access_token=abc; refresh_token=def') + }) + + // Scenario: custom route paths should support independent upstream targets. + it('should proxy custom route paths to their configured targets', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/api', + target: 'https://api.example.com', + }, + { + paths: '/files', + target: 'https://files.example.com/assets', + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5001/api/messages') + await app.request('http://127.0.0.1:5001/files/logo.png?size=small') + + // Assert + expect(fetchImpl.mock.calls.map(([url]) => url.toString())).toEqual([ + 'https://api.example.com/api/messages', + 'https://files.example.com/assets/files/logo.png?size=small', + ]) + }) + + // Scenario: routes are matched in config order so callers can put specific routes first. + it('should prefer earlier route entries', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/api/enterprise', + target: 'https://enterprise.example.com', + }, + { + paths: '/api', + target: 'https://api.example.com', + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5001/api/enterprise/sso/login') + + // Assert + expect(fetchImpl.mock.calls.map(([url]) => url.toString())).toEqual([ + 'https://enterprise.example.com/api/enterprise/sso/login', + ]) + }) + + // Scenario: preflight requests should advertise allowed headers for credentialed cross-origin calls. + it('should answer CORS preflight requests', async () => { + // Arrange + const app = createDevProxyApp({ + routes: [ + { + paths: '/api', + target: 'https://api.example.com', + }, + ], + fetchImpl: vi.fn(), + }) + + // Act + const response = await app.request('http://127.0.0.1:5001/api/messages', { + method: 'OPTIONS', + headers: { + 'Origin': 'http://localhost:3000', + 'Access-Control-Request-Headers': 'authorization,content-type,x-csrf-token', + }, + }) + + // Assert + expect(response.status).toBe(204) + expect(response.headers.get('access-control-allow-origin')).toBe('http://localhost:3000') + expect(response.headers.get('access-control-allow-credentials')).toBe('true') + expect(response.headers.get('access-control-allow-headers')).toBe('authorization,content-type,x-csrf-token') + }) +}) diff --git a/packages/dev-proxy/src/server.ts b/packages/dev-proxy/src/server.ts new file mode 100644 index 0000000000..79654750da --- /dev/null +++ b/packages/dev-proxy/src/server.ts @@ -0,0 +1,254 @@ +import type { Context, Hono } from 'hono' +import type { CookieRewriteOptions, CreateDevProxyAppOptions, DevProxyCorsAllowedOrigins, DevProxyRoute } from './types' +import { Hono as HonoApp } from 'hono' +import { rewriteCookieHeaderForUpstream, rewriteSetCookieHeadersForLocal } from './cookies' + +const LOCAL_DEV_HOSTS = new Set(['localhost', '127.0.0.1', '[::1]', '::1']) +const ALLOW_METHODS = 'GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS' +const DEFAULT_ALLOW_HEADERS = 'Authorization, Content-Type, X-CSRF-Token' +const UPSTREAM_ACCEPT_ENCODING = 'identity' +const RESPONSE_HEADERS_TO_DROP = [ + 'connection', + 'content-encoding', + 'content-length', + 'keep-alive', + 'proxy-authenticate', + 'proxy-authorization', + 'te', + 'trailer', + 'transfer-encoding', + 'upgrade', +] as const + +const appendHeaderValue = (headers: Headers, name: string, value: string) => { + const currentValue = headers.get(name) + if (!currentValue) { + headers.set(name, value) + return + } + + if (currentValue.split(',').map(item => item.trim()).includes(value)) + return + + headers.set(name, `${currentValue}, ${value}`) +} + +export const isAllowedLocalDevOrigin = (origin?: string | null) => { + if (!origin) + return false + + try { + const url = new URL(origin) + return LOCAL_DEV_HOSTS.has(url.hostname) + } + catch { + return false + } +} + +export const isAllowedDevOrigin = ( + origin?: string | null, + allowedOrigins: DevProxyCorsAllowedOrigins = 'local', +) => { + if (!origin) + return false + + if (allowedOrigins === 'local') + return isAllowedLocalDevOrigin(origin) + + return allowedOrigins.includes(origin) +} + +const applyCorsHeaders = ( + headers: Headers, + origin: string | undefined | null, + allowedOrigins: DevProxyCorsAllowedOrigins = 'local', +) => { + if (!isAllowedDevOrigin(origin, allowedOrigins)) + return + + headers.set('Access-Control-Allow-Origin', origin!) + headers.set('Access-Control-Allow-Credentials', 'true') + appendHeaderValue(headers, 'Vary', 'Origin') +} + +export const buildUpstreamUrl = (target: string, requestPath: string, search = '') => { + const targetUrl = new URL(target) + const normalizedTargetPath = targetUrl.pathname === '/' ? '' : targetUrl.pathname.replace(/\/$/, '') + const normalizedRequestPath = requestPath.startsWith('/') ? requestPath : `/${requestPath}` + const hasTargetPrefix = normalizedTargetPath + && (normalizedRequestPath === normalizedTargetPath || normalizedRequestPath.startsWith(`${normalizedTargetPath}/`)) + + targetUrl.pathname = hasTargetPrefix + ? normalizedRequestPath + : `${normalizedTargetPath}${normalizedRequestPath}` + targetUrl.search = search + + return targetUrl +} + +const createProxyRequestHeaders = ( + request: Request, + targetUrl: URL, + cookieRewrite: CookieRewriteOptions | false | undefined, +) => { + const headers = new Headers(request.headers) + headers.delete('host') + headers.set('accept-encoding', UPSTREAM_ACCEPT_ENCODING) + + if (headers.has('origin')) + headers.set('origin', targetUrl.origin) + + if (cookieRewrite) { + const rewrittenCookieHeader = rewriteCookieHeaderForUpstream(headers.get('cookie') || undefined, { + ...cookieRewrite, + useHostPrefix: targetUrl.protocol === 'https:', + }) + if (rewrittenCookieHeader) + headers.set('cookie', rewrittenCookieHeader) + } + + return headers +} + +const getSetCookieHeaders = (headers: Headers) => { + const headersWithGetSetCookie = headers as Headers & { getSetCookie?: () => string[] } + const setCookieHeaders = headersWithGetSetCookie.getSetCookie?.() + if (setCookieHeaders?.length) + return setCookieHeaders + + const setCookie = headers.get('set-cookie') + return setCookie ? [setCookie] : [] +} + +const createUpstreamResponseHeaders = ( + response: Response, + requestOrigin: string | undefined | null, + allowedOrigins: DevProxyCorsAllowedOrigins, + cookieRewrite: CookieRewriteOptions | false | undefined, +) => { + const headers = new Headers(response.headers) + RESPONSE_HEADERS_TO_DROP.forEach(header => headers.delete(header)) + headers.delete('set-cookie') + + const setCookieHeaders = getSetCookieHeaders(response.headers) + const responseSetCookieHeaders = cookieRewrite + ? rewriteSetCookieHeadersForLocal(setCookieHeaders) + : setCookieHeaders + + responseSetCookieHeaders.forEach((cookie) => { + headers.append('set-cookie', cookie) + }) + + applyCorsHeaders(headers, requestOrigin, allowedOrigins) + return headers +} + +const proxyRequest = async ( + context: Context, + route: DevProxyRoute, + fetchImpl: typeof globalThis.fetch, + allowedOrigins: DevProxyCorsAllowedOrigins, +) => { + const requestUrl = new URL(context.req.url) + const targetUrl = buildUpstreamUrl(route.target, requestUrl.pathname, requestUrl.search) + const requestHeaders = createProxyRequestHeaders(context.req.raw, targetUrl, route.cookieRewrite) + const requestInit: RequestInit & { duplex?: 'half' } = { + method: context.req.method, + headers: requestHeaders, + redirect: 'manual', + } + + if (context.req.method !== 'GET' && context.req.method !== 'HEAD') { + requestInit.body = context.req.raw.body + requestInit.duplex = 'half' + } + + const upstreamResponse = await fetchImpl(targetUrl, requestInit) + const responseHeaders = createUpstreamResponseHeaders( + upstreamResponse, + context.req.header('origin'), + allowedOrigins, + route.cookieRewrite, + ) + + return new Response(upstreamResponse.body, { + status: upstreamResponse.status, + statusText: upstreamResponse.statusText, + headers: responseHeaders, + }) +} + +const normalizeRoutePaths = (paths: DevProxyRoute['paths']) => Array.isArray(paths) ? paths : [paths] + +const registerProxyRoute = ( + app: Hono, + route: DevProxyRoute, + path: string, + fetchImpl: typeof globalThis.fetch, + allowedOrigins: DevProxyCorsAllowedOrigins, +) => { + if (!path.startsWith('/')) + throw new Error(`Invalid dev proxy route path "${path}". Paths must start with "/".`) + + app.all(path, context => proxyRequest(context, route, fetchImpl, allowedOrigins)) + app.all(`${path}/*`, context => proxyRequest(context, route, fetchImpl, allowedOrigins)) +} + +const registerProxyRoutes = ( + app: Hono, + routes: readonly DevProxyRoute[], + fetchImpl: typeof globalThis.fetch, + allowedOrigins: DevProxyCorsAllowedOrigins, +) => { + routes.forEach((route) => { + normalizeRoutePaths(route.paths).forEach((path) => { + registerProxyRoute(app, route, path, fetchImpl, allowedOrigins) + }) + }) +} + +export const createDevProxyApp = (options: CreateDevProxyAppOptions) => { + const app = new HonoApp() + const fetchImpl = options.fetchImpl || globalThis.fetch + const logger = options.logger || console + const allowedOrigins = options.cors?.allowedOrigins || 'local' + + app.onError((error, context) => { + logger.error('[dev-proxy]', error) + + const headers = new Headers() + applyCorsHeaders(headers, context.req.header('origin'), allowedOrigins) + + return new Response('Upstream proxy request failed.', { + status: 502, + headers, + }) + }) + + app.use('*', async (context, next) => { + if (context.req.method === 'OPTIONS') { + const headers = new Headers() + applyCorsHeaders(headers, context.req.header('origin'), allowedOrigins) + headers.set('Access-Control-Allow-Methods', ALLOW_METHODS) + headers.set( + 'Access-Control-Allow-Headers', + context.req.header('Access-Control-Request-Headers') || DEFAULT_ALLOW_HEADERS, + ) + if (context.req.header('Access-Control-Request-Private-Network') === 'true') + headers.set('Access-Control-Allow-Private-Network', 'true') + + return new Response(null, { + status: 204, + headers, + }) + } + + await next() + applyCorsHeaders(context.res.headers, context.req.header('origin'), allowedOrigins) + }) + + registerProxyRoutes(app, options.routes, fetchImpl, allowedOrigins) + + return app +} diff --git a/packages/dev-proxy/src/types.ts b/packages/dev-proxy/src/types.ts new file mode 100644 index 0000000000..2c42b2f7fb --- /dev/null +++ b/packages/dev-proxy/src/types.ts @@ -0,0 +1,50 @@ +export type DevProxyServerConfig = { + host?: string + port?: number +} + +export type DevProxyCorsAllowedOrigins = 'local' | readonly string[] + +export type DevProxyCorsConfig = { + allowedOrigins?: DevProxyCorsAllowedOrigins +} + +export type CookieNameMatcher = string | RegExp + +export type CookieRewriteOptions = { + hostPrefixCookies?: readonly CookieNameMatcher[] +} + +export type DevProxyRoute = { + paths: string | readonly string[] + target: string + cookieRewrite?: CookieRewriteOptions | false +} + +export type DevProxyConfig = { + server?: DevProxyServerConfig + routes: readonly DevProxyRoute[] + cors?: DevProxyCorsConfig +} + +export type DevProxyCliOptions = { + config?: string + envFile?: string + host?: string + port?: string + help?: boolean +} + +export type DevProxyConfigLoadOptions = { + envFile?: string | false +} + +export type ResolvedDevProxyServerOptions = { + host: string + port: number +} + +export type CreateDevProxyAppOptions = Pick & { + fetchImpl?: typeof globalThis.fetch + logger?: Pick +} diff --git a/packages/dev-proxy/tsconfig.json b/packages/dev-proxy/tsconfig.json new file mode 100644 index 0000000000..813a9bd8a3 --- /dev/null +++ b/packages/dev-proxy/tsconfig.json @@ -0,0 +1,17 @@ +{ + "extends": "@dify/tsconfig/node.json", + "compilerOptions": { + "types": [ + "node", + "vitest/globals" + ] + }, + "include": [ + "src/**/*.ts", + "vite.config.ts" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/packages/dev-proxy/vite.config.ts b/packages/dev-proxy/vite.config.ts new file mode 100644 index 0000000000..d060ae036e --- /dev/null +++ b/packages/dev-proxy/vite.config.ts @@ -0,0 +1,27 @@ +import { defineConfig } from 'vite-plus' + +export default defineConfig({ + pack: { + clean: true, + deps: { + neverBundle: [ + '@hono/node-server', + 'c12', + 'hono', + ], + }, + entry: [ + 'src/index.ts', + 'src/cli.ts', + ], + format: ['esm'], + outDir: 'dist', + platform: 'node', + sourcemap: true, + target: 'node22', + treeshake: true, + }, + test: { + environment: 'node', + }, +}) diff --git a/packages/dify-ui/AGENTS.md b/packages/dify-ui/AGENTS.md index d8a59b7a0b..bdc2160702 100644 --- a/packages/dify-ui/AGENTS.md +++ b/packages/dify-ui/AGENTS.md @@ -56,4 +56,28 @@ The Figma design system uses `--radius/*` tokens whose scale is **offset by one - When the Figma MCP returns `rounded-[var(--radius/sm, 6px)]`, convert it to the standard Tailwind class from the table above (e.g. `rounded-md`). - For values without a standard Tailwind equivalent (10px, 20px, 28px), use arbitrary values like `rounded-[10px]`. +## Search / Picker Primitive Selection: Autocomplete vs Combobox vs Select + +Pick by whether the user is entering free-form text, choosing a remembered value, or selecting from a closed list. + +Base UI decision rules: + +- [Autocomplete docs]: use `Combobox` instead of `Autocomplete` if the selection should be remembered and the input value cannot be custom. +- [Combobox docs]: do not use `Combobox` for simple search widgets that require unrestricted text entry; use `Autocomplete` instead. + +Apply this split in Dify UI: + +- `Autocomplete` — free-form text input with optional suggestions or completions. The input value may be custom and does not necessarily become a selected option. Use for search boxes, command-style suggestions, tag suggestions, and async text completion. +- `Combobox` — searchable picker whose value is one or more selected items from a collection. The chosen value is remembered by the root, and free-form text is not the final value. Use for model pickers, user pickers, dataset/document pickers, and multi-select chips. +- `Select` — closed-list picker without text entry. Use when the option set is small or already scannable and filtering is unnecessary. + +Composition rules: + +- Keep Base UI primitive semantics visible in the public API. Export compound parts such as `ComboboxInputGroup`, `ComboboxInput`, `ComboboxContent`, `ComboboxList`, `ComboboxItem`, and `ComboboxItemIndicator` instead of wrapping them into one business component. +- For `Combobox` multiple selection, follow the official chips pattern: `ComboboxInputGroup` contains `ComboboxChips`, `ComboboxValue` renders `ComboboxChip` items, and `ComboboxInput` remains inside the chips row. Chips should wrap and let the input group grow vertically instead of forcing horizontal overflow. +- Content primitives must own their Base UI `Portal` and use `z-1002` on `Positioner`, matching the overlay contract in `README.md`. +- Use `w-(--anchor-width)` with viewport-aware max-width for `Autocomplete` and `Combobox` popups. Do not add `min-w-(--anchor-width)` when it would defeat available-width clamping. + +[Autocomplete docs]: https://base-ui.com/react/components/autocomplete.md#usage-guidelines +[Combobox docs]: https://base-ui.com/react/components/combobox.md#usage-guidelines [docs]: https://base-ui.com/react/components/tooltip#infotips diff --git a/packages/dify-ui/README.md b/packages/dify-ui/README.md index cd24a0c078..2915fe5db7 100644 --- a/packages/dify-ui/README.md +++ b/packages/dify-ui/README.md @@ -28,6 +28,7 @@ Always import from a **subpath export** — there is no barrel: import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' import { Dialog, DialogContent, DialogTrigger } from '@langgenius/dify-ui/dialog' +import { Drawer, DrawerPopup, DrawerTrigger } from '@langgenius/dify-ui/drawer' import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import '@langgenius/dify-ui/styles.css' // once, in the app root ``` @@ -36,12 +37,12 @@ Importing from `@langgenius/dify-ui` (no subpath) is intentionally not supported ## Primitives -| Category | Subpath | Notes | -| -------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------- | -| Overlay | `./alert-dialog`, `./context-menu`, `./dialog`, `./dropdown-menu`, `./popover`, `./select`, `./toast`, `./tooltip` | Portalled. See [Overlay & portal contract] below. | -| Form | `./number-field`, `./slider`, `./switch` | Controlled / uncontrolled per Base UI defaults. | -| Layout | `./scroll-area` | Custom-styled scrollbar over the host viewport. | -| Media | `./avatar`, `./button` | Button exposes `cva` variants. | +| Category | Subpath | Notes | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| Overlay | `./alert-dialog`, `./autocomplete`, `./combobox`, `./context-menu`, `./dialog`, `./drawer`, `./dropdown-menu`, `./popover`, `./select`, `./toast`, `./tooltip` | Portalled. See [Overlay & portal contract] below. | +| Form | `./autocomplete`, `./combobox`, `./number-field`, `./slider`, `./switch` | Controlled / uncontrolled per Base UI defaults. | +| Layout | `./scroll-area` | Custom-styled scrollbar over the host viewport. | +| Media | `./avatar`, `./button` | Button exposes `cva` variants. | Utilities: @@ -65,7 +66,7 @@ If a consumer uses Dify UI source files through the workspace, add an explicit s ## Overlay & portal contract -All overlay primitives (`dialog`, `alert-dialog`, `popover`, `dropdown-menu`, `context-menu`, `select`, `tooltip`, `toast`) render their content inside a [Base UI Portal] attached to `document.body`. This is the Base UI default — see the upstream [Portals][Base UI Portal] docs for the underlying behavior. Consumers **do not** need to wrap anything in a portal manually. +Overlay primitives render their floating surfaces inside a [Base UI Portal] attached to `document.body`. This is the Base UI default — see the upstream [Portals][Base UI Portal] docs for the underlying behavior. Convenience content components such as `DialogContent`, `PopoverContent`, and `SelectContent` own their portal internally; primitives with explicit portal anatomy such as `Drawer` expose the matching `DrawerPortal` part so consumers can compose the full Base UI structure. ### Root isolation requirement @@ -83,19 +84,19 @@ Equivalent: any root element with `isolation: isolate` in CSS. Without it, overl Every overlay primitive uses a single, shared z-index. Do **not** override it at call sites. -| Layer | z-index | Where | -| ----------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------- | -| Overlays (Dialog, AlertDialog, Popover, DropdownMenu, ContextMenu, Select, Tooltip) | `z-1002` | Positioner / Backdrop | -| Toast viewport | `z-1003` | One layer above overlays so notifications are never hidden under a dialog. | +| Layer | z-index | Where | +| ------------------------------------------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------- | +| Overlays (Dialog, AlertDialog, Autocomplete, Combobox, Drawer, Popover, DropdownMenu, ContextMenu, Select, Tooltip) | `z-1002` | Positioner / Backdrop | +| Toast viewport | `z-1003` | One layer above overlays so notifications are never hidden under a dialog. | -Rationale: during Dify's migration from legacy `portal-to-follow-elem` / `base/modal` / `base/dialog` overlays to this package, new and old overlays coexist in the DOM. `z-1002` sits above any common legacy layer, eliminating per-call-site z-index hacks. Among themselves, new primitives share the same z-index and **rely on DOM order** for stacking — the portal mounted later wins. +Rationale: during Dify's migration from legacy `base/modal` / `base/dialog` / `base/drawer` / `base/drawer-plus` overlays to this package, new and old overlays coexist in the DOM. `z-1002` sits above any common legacy layer, eliminating per-call-site z-index hacks. Among themselves, new primitives share the same z-index and **rely on DOM order** for stacking — the portal mounted later wins. -See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for the Dify-web migration history and the remaining legacy allowlist. Once the legacy overlays are gone, the values in this table can drop back to `z-50` / `z-51`. +See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for the Dify-web migration history. Once the legacy overlays are gone, the values in this table can drop back to `z-50` / `z-51`. ### Rules - Never add `z-1003` / `z-9999` / etc. overrides on primitives from this package. If something is getting clipped, the **parent** overlay (typically a legacy one) is the problem and should be migrated. -- Never portal an overlay manually on top of our primitives — use `DialogTrigger`, `PopoverTrigger`, etc. Base UI handles focus management, scroll-locking, and dismissal. +- Never create an extra manual portal on top of our primitives — use the exported content / portal parts such as `DialogContent`, `PopoverContent`, and `DrawerPortal`. Base UI handles focus management, scroll-locking, and dismissal. - When a primitive needs additional presentation chrome (e.g. a custom backdrop), add it **inside** the exported component, not at call sites. ## Development diff --git a/packages/dify-ui/package.json b/packages/dify-ui/package.json index 73c6c0bd22..894e92bfd6 100644 --- a/packages/dify-ui/package.json +++ b/packages/dify-ui/package.json @@ -13,6 +13,10 @@ "types": "./src/alert-dialog/index.tsx", "import": "./src/alert-dialog/index.tsx" }, + "./autocomplete": { + "types": "./src/autocomplete/index.tsx", + "import": "./src/autocomplete/index.tsx" + }, "./avatar": { "types": "./src/avatar/index.tsx", "import": "./src/avatar/index.tsx" @@ -21,6 +25,10 @@ "types": "./src/button/index.tsx", "import": "./src/button/index.tsx" }, + "./combobox": { + "types": "./src/combobox/index.tsx", + "import": "./src/combobox/index.tsx" + }, "./context-menu": { "types": "./src/context-menu/index.tsx", "import": "./src/context-menu/index.tsx" @@ -29,6 +37,10 @@ "types": "./src/dialog/index.tsx", "import": "./src/dialog/index.tsx" }, + "./drawer": { + "types": "./src/drawer/index.tsx", + "import": "./src/drawer/index.tsx" + }, "./dropdown-menu": { "types": "./src/dropdown-menu/index.tsx", "import": "./src/dropdown-menu/index.tsx" @@ -103,6 +115,7 @@ "@storybook/addon-themes": "catalog:", "@storybook/react-vite": "catalog:", "@tailwindcss/vite": "catalog:", + "@tanstack/react-virtual": "catalog:", "@types/react": "catalog:", "@types/react-dom": "catalog:", "@typescript/native-preview": "catalog:", diff --git a/packages/dify-ui/src/autocomplete/__tests__/index.spec.tsx b/packages/dify-ui/src/autocomplete/__tests__/index.spec.tsx new file mode 100644 index 0000000000..a7031c5b12 --- /dev/null +++ b/packages/dify-ui/src/autocomplete/__tests__/index.spec.tsx @@ -0,0 +1,252 @@ +import type { ReactNode } from 'react' +import { render } from 'vitest-browser-react' +import { + Autocomplete, + AutocompleteClear, + AutocompleteContent, + AutocompleteEmpty, + AutocompleteGroup, + AutocompleteInput, + AutocompleteInputGroup, + AutocompleteItem, + AutocompleteItemIndicator, + AutocompleteItemText, + AutocompleteLabel, + AutocompleteList, + AutocompleteSeparator, + AutocompleteStatus, + AutocompleteTrigger, +} from '../index' + +const renderWithSafeViewport = (ui: ReactNode) => render( +
+ {ui} +
, +) + +const asHTMLElement = (element: HTMLElement | SVGElement) => element as HTMLElement + +const renderAutocomplete = ({ + children, + open = false, + defaultValue = 'workflow', +}: { + children?: ReactNode + open?: boolean + defaultValue?: string +} = {}) => renderWithSafeViewport( + + {children ?? ( + <> + + + + + + + 2 suggestions + + + Workflow + + + + Dataset + + + No suggestions + + + )} + , +) + +describe('Autocomplete wrappers', () => { + describe('Input group and input', () => { + it('should apply medium input group and input classes by default', async () => { + const screen = await renderAutocomplete() + + await expect.element(screen.getByTestId('input-group')).toHaveClass('rounded-lg') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveClass('px-3') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveClass('system-sm-regular') + }) + + it('should apply large input group and input classes when large size is provided', async () => { + const screen = await renderAutocomplete({ + children: ( + + + + ), + }) + + await expect.element(screen.getByTestId('input-group')).toHaveClass('rounded-[10px]') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveClass('px-4') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveClass('system-md-regular') + }) + + it('should set input defaults and forward passthrough props', async () => { + const screen = await renderAutocomplete({ + children: ( + + + + ), + }) + + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveAttribute('autocomplete', 'off') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveAttribute('type', 'text') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveAttribute('placeholder', 'Find a resource') + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toBeRequired() + await expect.element(screen.getByRole('combobox', { name: 'Search suggestions' })).toHaveClass('custom-input') + }) + }) + + describe('Controls', () => { + it('should provide fallback aria labels and decorative icons when labels are omitted', async () => { + const screen = await renderAutocomplete() + + await expect.element(screen.getByRole('button', { name: 'Clear autocomplete' })).toHaveAttribute('type', 'button') + await expect.element(screen.getByRole('button', { name: 'Open autocomplete suggestions' })).toHaveAttribute('type', 'button') + expect(screen.getByRole('button', { name: 'Clear autocomplete' }).element().querySelector('.i-ri-close-line')).toHaveAttribute('aria-hidden', 'true') + expect(screen.getByRole('button', { name: 'Open autocomplete suggestions' }).element().querySelector('.i-ri-arrow-down-s-line')).toHaveAttribute('aria-hidden', 'true') + }) + + it('should preserve explicit labels and custom children', async () => { + const screen = await renderAutocomplete({ + children: ( + + + + reset + + + open + + + ), + }) + + expect(screen.getByRole('button', { name: 'Reset search' }).element()).toContainElement(screen.getByTestId('custom-clear').element()) + expect(screen.getByRole('button', { name: 'Show suggestions' }).element()).toContainElement(screen.getByTestId('custom-trigger').element()) + expect(screen.getByRole('button', { name: 'Reset search' }).element().querySelector('.i-ri-close-line')).not.toBeInTheDocument() + expect(screen.getByRole('button', { name: 'Show suggestions' }).element().querySelector('.i-ri-arrow-down-s-line')).not.toBeInTheDocument() + }) + + it('should rely on aria-labelledby when provided instead of injecting fallback labels', async () => { + const screen = await renderAutocomplete({ + children: ( + <> + Clear from label + Trigger from label + + + + + + + ), + }) + + await expect.element(screen.getByRole('button', { name: 'Clear from label' })).not.toHaveAttribute('aria-label') + await expect.element(screen.getByRole('button', { name: 'Trigger from label' })).not.toHaveAttribute('aria-label') + }) + }) + + describe('Content and options', () => { + it('should use default overlay placement and Dify popup classes', async () => { + const screen = await renderAutocomplete({ open: true }) + + await expect.element(screen.getByRole('group', { name: 'autocomplete positioner' })).toHaveAttribute('data-side', 'bottom') + await expect.element(screen.getByRole('group', { name: 'autocomplete positioner' })).toHaveAttribute('data-align', 'start') + await expect.element(screen.getByRole('group', { name: 'autocomplete positioner' })).toHaveClass('z-1002') + await expect.element(screen.getByRole('dialog', { name: 'autocomplete popup' })).toHaveClass('rounded-xl') + await expect.element(screen.getByRole('dialog', { name: 'autocomplete popup' })).toHaveClass('w-(--anchor-width)') + await expect.element(screen.getByRole('listbox', { name: 'autocomplete list' })).toHaveClass('scroll-py-1') + }) + + it('should apply custom placement side and passthrough popup props', async () => { + const onPopupClick = vi.fn() + const screen = await renderWithSafeViewport( + + + + + + + + Workflow + + + + , + ) + + asHTMLElement(screen.getByRole('dialog', { name: 'autocomplete popup' }).element()).click() + + await expect.element(screen.getByRole('group', { name: 'autocomplete positioner' })).toHaveAttribute('data-side', 'top') + expect(onPopupClick).toHaveBeenCalledTimes(1) + }) + + it('should render item text indicator status and empty wrappers with design classes', async () => { + const screen = await renderAutocomplete({ open: true }) + + await expect.element(screen.getByText('Workflow')).toHaveClass('system-sm-medium') + await expect.element(screen.getByTestId('status')).toHaveClass('text-text-tertiary') + await expect.element(screen.getByTestId('empty')).toHaveClass('system-sm-regular') + expect(screen.getByText('Workflow').element().parentElement?.querySelector('.i-ri-arrow-right-line')).toHaveAttribute('aria-hidden', 'true') + }) + + it('should forward custom classes to label separator item text and indicator', async () => { + const screen = await renderWithSafeViewport( + + + + + + + + Resources + + + Workflow + + + + + + , + ) + + await expect.element(screen.getByText('Resources')).toHaveClass('custom-label') + await expect.element(screen.getByTestId('separator')).toHaveClass('custom-separator') + await expect.element(screen.getByRole('option', { name: 'Workflow' })).toHaveClass('custom-item') + await expect.element(screen.getByText('Workflow')).toHaveClass('custom-text') + await expect.element(screen.getByTestId('indicator')).toHaveClass('custom-indicator') + }) + }) +}) diff --git a/packages/dify-ui/src/autocomplete/index.stories.tsx b/packages/dify-ui/src/autocomplete/index.stories.tsx new file mode 100644 index 0000000000..71c7c6607d --- /dev/null +++ b/packages/dify-ui/src/autocomplete/index.stories.tsx @@ -0,0 +1,721 @@ +import type { Meta, StoryObj } from '@storybook/react-vite' +import type { Virtualizer } from '@tanstack/react-virtual' +import type { RefObject } from 'react' +import { useVirtualizer } from '@tanstack/react-virtual' +import { useEffect, useMemo, useRef, useState } from 'react' +import { + Autocomplete, + AutocompleteClear, + AutocompleteCollection, + AutocompleteContent, + AutocompleteEmpty, + AutocompleteGroup, + AutocompleteInput, + AutocompleteInputGroup, + AutocompleteItem, + AutocompleteItemText, + AutocompleteLabel, + AutocompleteList, + AutocompleteSeparator, + AutocompleteStatus, + AutocompleteTrigger, + useAutocompleteFilter, + useAutocompleteFilteredItems, +} from '.' +import { cn } from '../cn' + +type Suggestion = { + value: string + label: string + description?: string + icon?: string + meta?: string +} + +type SuggestionGroup = { + label: string + items: Suggestion[] +} + +const inputWidth = 'w-80' + +type StoryVirtualizer = Virtualizer + +const scrollHighlightedVirtualItem = ( + item: unknown, + { + reason, + index, + }: { + reason: 'keyboard' | 'pointer' | 'none' + index: number + }, + virtualizer: StoryVirtualizer | null, +) => { + if (!item || !virtualizer) + return + + const isStart = index === 0 + const isEnd = index === virtualizer.options.count - 1 + const shouldScroll = reason === 'none' || (reason === 'keyboard' && (isStart || isEnd)) + + if (shouldScroll) { + queueMicrotask(() => { + virtualizer.scrollToIndex(index, { align: isEnd ? 'start' : 'end' }) + }) + } +} + +const tagSuggestions: Suggestion[] = [ + { value: 'feature', label: 'feature', description: 'Product work and launch notes' }, + { value: 'fix', label: 'fix', description: 'Bug fixes and regressions' }, + { value: 'docs', label: 'docs', description: 'Documentation updates' }, + { value: 'internal', label: 'internal', description: 'Workspace-only notes' }, + { value: 'mobile', label: 'mobile', description: 'Mobile app issues' }, + { value: 'component: autocomplete', label: 'component: autocomplete', description: 'Base UI primitive wrapper' }, + { value: 'component: combobox', label: 'component: combobox', description: 'Filterable predefined selection' }, + { value: 'component: select', label: 'component: select', description: 'Compact predefined selection' }, +] + +const promptCompletions: Suggestion[] = [ + { value: 'summarize this conversation', label: 'summarize this conversation' }, + { value: 'summarize this dataset with citations', label: 'summarize this dataset with citations' }, + { value: 'summarize this workflow run for an operator', label: 'summarize this workflow run for an operator' }, + { value: 'summarize this support ticket in 3 bullets', label: 'summarize this support ticket in 3 bullets' }, +] + +const workflowSuggestions: Suggestion[] = [ + { value: 'http-request', label: 'HTTP Request', description: 'Call an external API', icon: 'i-ri-global-line', meta: 'Tool' }, + { value: 'knowledge-retrieval', label: 'Knowledge Retrieval', description: 'Search configured datasets', icon: 'i-ri-database-2-line', meta: 'Tool' }, + { value: 'code-execution', label: 'Code Execution', description: 'Run sandboxed snippets', icon: 'i-ri-code-s-slash-line', meta: 'Tool' }, + { value: 'template-transform', label: 'Template Transform', description: 'Compose variables into output', icon: 'i-ri-braces-line', meta: 'Tool' }, + { value: 'question-classifier', label: 'Question Classifier', description: 'Route by intent', icon: 'i-ri-git-branch-line', meta: 'Tool' }, + { value: 'parameter-extractor', label: 'Parameter Extractor', description: 'Extract typed values', icon: 'i-ri-list-check-3', meta: 'Tool' }, + { value: 'answer-node', label: 'Answer Node', description: 'Return a final assistant answer', icon: 'i-ri-message-3-line', meta: 'Node' }, + { value: 'iteration-node', label: 'Iteration Node', description: 'Run a loop over array items', icon: 'i-ri-repeat-line', meta: 'Node' }, + { value: 'variable-assigner', label: 'Variable Assigner', description: 'Persist intermediate state', icon: 'i-ri-pencil-ruler-2-line', meta: 'Node' }, +] + +const groupedSuggestions: SuggestionGroup[] = [ + { + label: 'Tags', + items: tagSuggestions.slice(0, 5), + }, + { + label: 'Workflow Suggestions', + items: workflowSuggestions.slice(0, 5), + }, + { + label: 'Prompt Starters', + items: promptCompletions.slice(0, 3), + }, +] + +const commandGroups: SuggestionGroup[] = [ + { + label: 'App', + items: [ + { value: '/run', label: 'Run workflow', description: 'Execute the current draft', icon: 'i-ri-play-circle-line' }, + { value: '/publish', label: 'Publish app', description: 'Ship the current configuration', icon: 'i-ri-upload-cloud-2-line' }, + { value: '/trace', label: 'Open trace', description: 'Inspect the latest workflow run', icon: 'i-ri-route-line' }, + ], + }, + { + label: 'Workspace', + items: [ + { value: '/dataset', label: 'Search datasets', description: 'Find knowledge attached to this app', icon: 'i-ri-database-line' }, + { value: '/members', label: 'Invite members', description: 'Open workspace access settings', icon: 'i-ri-user-add-line' }, + { value: '/usage', label: 'View usage', description: 'Open model and workflow usage', icon: 'i-ri-bar-chart-line' }, + ], + }, +] + +const remoteSuggestions: Suggestion[] = [ + { value: 'agent-builder', label: 'Agent Builder', description: 'Workspace app' }, + { value: 'agent-observability', label: 'Agent Observability', description: 'Dataset' }, + { value: 'agent-routing-dataset', label: 'Agent Routing Dataset', description: 'Knowledge source' }, +] + +const virtualizedSuggestions: Suggestion[] = Array.from({ length: 1000 }, (_, index) => { + const family = ['workflow', 'dataset', 'prompt', 'tool'][index % 4]! + const number = new Intl.NumberFormat('en-US', { + minimumIntegerDigits: 4, + }).format(index + 1) + + return { + value: `${family}-${index + 1}`, + label: `${family} suggestion ${number}`, + description: `Free-form autocomplete result from ${family} search`, + icon: family === 'dataset' + ? 'i-ri-database-2-line' + : family === 'prompt' + ? 'i-ri-text-snippet' + : family === 'tool' + ? 'i-ri-tools-line' + : 'i-ri-flow-chart', + meta: family, + } +}) + +const getSuggestionLabel = (item: Suggestion) => item.label + +const SuggestionItem = ({ + item, + index, + dense, +}: { + item: Suggestion + index?: number + dense?: boolean +}) => ( + + {item.icon && +) + +const TagSuggestionItem = ({ + item, + index, +}: { + item: Suggestion + index?: number +}) => ( + + {item.label} + {item.description && {item.description}} + +) + +const BasicTagAutocomplete = ({ + size = 'medium', +}: { + size?: 'small' | 'medium' | 'large' +}) => ( + + + + + + {(item: Suggestion, index: number) => ( + + )} + + No tag suggestion. Keep the typed value. + + +) + +const GroupedSuggestionList = () => { + const groups = useAutocompleteFilteredItems() + + return ( + + {groups.map((group, groupIndex) => ( + + {groupIndex > 0 && } + {group.label} + + {(item: Suggestion) => ( + + )} + + + ))} + + ) +} + +const CommandPaletteList = () => { + const groups = useAutocompleteFilteredItems() + + return ( + + {groups.map((group, groupIndex) => ( + + {groupIndex > 0 && } + {group.label} + + {(item: Suggestion) => ( + + + {item.icon && + + Enter + + + )} + + + ))} + + ) +} + +const LimitedStatus = ({ + total, +}: { + total: number +}) => { + const items = useAutocompleteFilteredItems() + const hidden = Math.max(0, total - items.length) + + return hidden > 0 + ? `${hidden} more suggestions hidden. Refine the query to narrow results.` + : `${items.length} suggestions available.` +} + +const AsyncSearchDemo = () => { + const [value, setValue] = useState('agent') + const [loading, setLoading] = useState(false) + const [items, setItems] = useState(remoteSuggestions) + + useEffect(() => { + setLoading(true) + const timeout = window.setTimeout(() => { + setItems( + value.trim() + ? remoteSuggestions.filter(item => item.label.toLowerCase().includes(value.trim().toLowerCase())) + : remoteSuggestions, + ) + setLoading(false) + }, 500) + + return () => window.clearTimeout(timeout) + }, [value]) + + return ( +
+ + + + + + {loading ? 'Loading suggestions…' : `${items.length} remote suggestions`} + + + {(item: Suggestion, index: number) => ( + + )} + + No remote suggestion. Keep the typed query. + + +
+ ) +} + +const VirtualizedSuggestionList = ({ + virtualizerRef, +}: { + virtualizerRef: RefObject +}) => { + const scrollRef = useRef(null) + const filteredItems = useAutocompleteFilteredItems() + const virtualizer = useVirtualizer({ + count: filteredItems.length, + getScrollElement: () => scrollRef.current, + estimateSize: () => 44, + overscan: 6, + }) + + useEffect(() => { + virtualizerRef.current = virtualizer + + return () => { + virtualizerRef.current = null + } + }, [virtualizer, virtualizerRef]) + + return ( +
+ + {virtualizer.getVirtualItems().map((virtualItem) => { + const item = filteredItems[virtualItem.index] + + if (!item) + return null + + return ( +
+ +
+ ) + })} +
+
+ ) +} + +const VirtualizedStatus = () => { + const filteredItems = useAutocompleteFilteredItems() + + return ( + + {filteredItems.length} + {' '} + matching suggestions. Selecting one only replaces the input text. + + ) +} + +const FuzzyHighlight = ({ + text, + query, +}: { + text: string + query: string +}) => { + const parts = useMemo(() => { + const trimmed = query.trim() + + if (!trimmed) + return [text] + + const escaped = trimmed.slice(0, 80).replace(/[.*+?^${}()|[\]\\]/g, '\\$&') + return text.split(new RegExp(`(${escaped})`, 'i')) + }, [query, text]) + + return ( + <> + {parts.map((part, index) => ( + part.toLowerCase() === query.trim().toLowerCase() + ? {part} + : part + ))} + + ) +} + +const FuzzyMatchingDemo = () => { + const [value, setValue] = useState('retr') + const { contains } = useAutocompleteFilter({ sensitivity: 'base' }) + + return ( +
+ + + + + + {(item: Suggestion, index: number) => ( + + {item.icon && + )} + + No workflow suggestion. Keep typing freely. + + +
+ ) +} + +const meta = { + title: 'Base/UI/Autocomplete', + component: Autocomplete, + parameters: { + layout: 'centered', + docs: { + description: { + component: 'Compound autocomplete built on Base UI Autocomplete. Use it for free-form inputs where suggestions can replace or complete the typed text, but selection is not persistent state.', + }, + }, + }, + tags: ['autodocs'], +} satisfies Meta + +export default meta +type Story = StoryObj + +export const SearchTags: Story = { + render: () => ( +
+ +
+ ), +} + +export const Sizes: Story = { + render: () => ( +
+ {(['small', 'medium', 'large'] as const).map(size => ( +
+ +
+ ))} +
+ ), +} + +export const InlineAutocomplete: Story = { + render: () => ( +
+ + + + + + {(item: Suggestion, index: number) => ( + + )} + + No inline completion. Continue typing freely. + + +
+ ), +} + +export const GroupedSuggestions: Story = { + render: () => ( +
+ + + + + + No suggestion. Use the text as entered. + + +
+ ), +} + +export const FuzzyMatching: Story = { + render: () => , +} + +export const LimitResults: Story = { + render: () => ( +
+ + + + + + + + + {(item: Suggestion, index: number) => ( + + )} + + No suggestion. Submit the typed text instead. + + +
+ ), +} + +export const CommandPalette: Story = { + render: () => ( +
+ + + + + +
+ ), +} + +const VirtualizedLongSuggestionsDemo = () => { + const virtualizerRef = useRef(null) + + return ( +
+ { + scrollHighlightedVirtualItem(item, details, virtualizerRef.current) + }} + > + + + + + + No suggestion. Free-form text is still valid. + + +
+ ) +} + +export const VirtualizedLongSuggestions: Story = { + render: () => , +} + +export const AsyncSearch: Story = { + render: () => , +} + +export const Empty: Story = { + render: () => ( +
+ + + + + + {(item: Suggestion, index: number) => ( + + )} + + No tag suggestion. The custom text remains valid. + + +
+ ), +} + +export const DisabledAndReadOnly: Story = { + render: () => ( +
+ + + + + + + + + {(item: Suggestion, index: number) => ( + + )} + + + + + + + + + + + + {(item: Suggestion, index: number) => ( + + )} + + + +
+ ), +} diff --git a/packages/dify-ui/src/autocomplete/index.tsx b/packages/dify-ui/src/autocomplete/index.tsx new file mode 100644 index 0000000000..16c4b19673 --- /dev/null +++ b/packages/dify-ui/src/autocomplete/index.tsx @@ -0,0 +1,381 @@ +'use client' + +import type { VariantProps } from 'class-variance-authority' +import type { HTMLAttributes, ReactNode } from 'react' +import type { Placement } from '../placement' +import { Autocomplete as BaseAutocomplete } from '@base-ui/react/autocomplete' +import { cva } from 'class-variance-authority' +import { cn } from '../cn' +import { + overlayIndicatorClassName, + overlayLabelClassName, + overlayPopupAnimationClassName, + overlaySeparatorClassName, +} from '../overlay-shared' +import { parsePlacement } from '../placement' + +export type { Placement } + +export const Autocomplete = BaseAutocomplete.Root +export const AutocompleteValue = BaseAutocomplete.Value +export const AutocompleteGroup = BaseAutocomplete.Group +export const AutocompleteCollection = BaseAutocomplete.Collection +export const AutocompleteRow = BaseAutocomplete.Row +export const useAutocompleteFilter = BaseAutocomplete.useFilter +export const useAutocompleteFilteredItems = BaseAutocomplete.useFilteredItems + +export type AutocompleteRootProps = BaseAutocomplete.Root.Props +export type AutocompleteRootChangeEventDetails = BaseAutocomplete.Root.ChangeEventDetails +export type AutocompleteRootHighlightEventDetails = BaseAutocomplete.Root.HighlightEventDetails + +const autocompletePopupClassName = [ + 'w-(--anchor-width) max-w-[min(28rem,var(--available-width))] overflow-hidden rounded-xl border-[0.5px] border-components-panel-border bg-components-panel-bg shadow-lg outline-hidden', + 'data-side-top:origin-bottom data-side-bottom:origin-top data-side-left:origin-right data-side-right:origin-left', +] + +const autocompleteListClassName = [ + 'max-h-[min(20rem,var(--available-height))] overflow-y-auto overflow-x-hidden overscroll-contain p-1 outline-hidden scroll-py-1', + 'data-empty:max-h-none data-empty:p-0', +] + +const autocompleteItemClassName = [ + 'mx-1 flex min-h-8 cursor-pointer select-none items-center gap-2 rounded-lg px-2 py-1.5 text-text-secondary outline-hidden transition-colors', + 'hover:bg-state-base-hover-alt hover:text-text-primary', + 'data-highlighted:bg-state-base-hover data-highlighted:text-text-primary', + 'data-disabled:cursor-not-allowed data-disabled:opacity-30 data-disabled:hover:bg-transparent data-disabled:hover:text-text-secondary', + 'motion-reduce:transition-none', +] + +const autocompleteInputGroupVariants = cva( + [ + 'group/autocomplete flex w-full min-w-0 items-center border border-transparent bg-components-input-bg-normal text-components-input-text-filled shadow-none outline-hidden transition-[background-color,border-color,box-shadow]', + 'hover:border-components-input-border-hover hover:bg-components-input-bg-hover', + 'focus-within:border-components-input-border-active focus-within:bg-components-input-bg-active focus-within:shadow-xs', + 'data-focused:border-components-input-border-active data-focused:bg-components-input-bg-active data-focused:shadow-xs', + 'data-disabled:cursor-not-allowed data-disabled:border-transparent data-disabled:bg-components-input-bg-disabled data-disabled:text-components-input-text-filled-disabled', + 'data-disabled:hover:border-transparent data-disabled:hover:bg-components-input-bg-disabled', + 'data-readonly:shadow-none data-readonly:hover:border-transparent data-readonly:hover:bg-components-input-bg-normal', + 'motion-reduce:transition-none', + ], + { + variants: { + size: { + small: 'h-6 rounded-md', + medium: 'h-8 rounded-lg', + large: 'h-9 rounded-[10px]', + }, + }, + defaultVariants: { + size: 'medium', + }, + }, +) + +export type AutocompleteSize = NonNullable['size']> + +export type AutocompleteInputGroupProps + = BaseAutocomplete.InputGroup.Props + & VariantProps + +export function AutocompleteInputGroup({ + className, + size = 'medium', + ...props +}: AutocompleteInputGroupProps) { + return ( + + ) +} + +const autocompleteInputVariants = cva( + [ + 'w-0 min-w-0 flex-1 appearance-none border-0 bg-transparent text-components-input-text-filled caret-primary-600 outline-hidden', + 'placeholder:text-components-input-text-placeholder', + 'disabled:cursor-not-allowed disabled:text-components-input-text-filled-disabled disabled:placeholder:text-components-input-text-disabled', + 'data-readonly:cursor-default', + ], + { + variants: { + size: { + small: 'px-2 py-1 system-xs-regular', + medium: 'px-3 py-[7px] system-sm-regular', + large: 'px-4 py-2 system-md-regular', + }, + }, + defaultVariants: { + size: 'medium', + }, + }, +) + +export type AutocompleteInputProps + = Omit + & VariantProps + +export function AutocompleteInput({ + className, + size = 'medium', + type = 'text', + autoComplete = 'off', + ...props +}: AutocompleteInputProps) { + return ( + + ) +} + +const autocompleteControlVariants = cva( + [ + 'flex shrink-0 touch-manipulation items-center justify-center rounded-md text-text-tertiary outline-hidden transition-colors', + 'hover:bg-components-input-bg-hover hover:text-text-secondary focus-visible:bg-components-input-bg-hover focus-visible:text-text-secondary', + 'focus-visible:ring-1 focus-visible:ring-components-input-border-active focus-visible:ring-inset', + 'disabled:cursor-not-allowed disabled:hover:bg-transparent disabled:hover:text-text-tertiary disabled:focus-visible:bg-transparent disabled:focus-visible:ring-0', + 'group-data-disabled/autocomplete:cursor-not-allowed group-data-disabled/autocomplete:hover:bg-transparent group-data-disabled/autocomplete:focus-visible:bg-transparent group-data-disabled/autocomplete:focus-visible:ring-0', + 'group-data-readonly/autocomplete:hidden', + 'motion-reduce:transition-none', + ], + { + variants: { + size: { + small: 'mr-1 size-4', + medium: 'mr-1.5 size-5', + large: 'mr-2 size-5', + }, + }, + defaultVariants: { + size: 'medium', + }, + }, +) + +export type AutocompleteControlProps + = Omit + & VariantProps + & { className?: string } + +export function AutocompleteTrigger({ + className, + children, + size = 'medium', + type = 'button', + ...props +}: AutocompleteControlProps) { + return ( + + {children ?? + ) +} + +export type AutocompleteClearProps + = Omit + & VariantProps + & { className?: string } + +export function AutocompleteClear({ + className, + children, + size = 'medium', + type = 'button', + ...props +}: AutocompleteClearProps) { + return ( + + {children ?? + ) +} + +export function AutocompleteIcon({ + className, + children, + ...props +}: BaseAutocomplete.Icon.Props) { + return ( + + {children ?? + ) +} + +type AutocompleteContentProps = { + children: ReactNode + placement?: Placement + sideOffset?: number + alignOffset?: number + className?: string + popupClassName?: string + portalProps?: Omit + positionerProps?: Omit< + BaseAutocomplete.Positioner.Props, + 'children' | 'className' | 'side' | 'align' | 'sideOffset' | 'alignOffset' + > + popupProps?: Omit< + BaseAutocomplete.Popup.Props, + 'children' | 'className' + > +} + +export function AutocompleteContent({ + children, + placement = 'bottom-start', + sideOffset = 4, + alignOffset = 0, + className, + popupClassName, + portalProps, + positionerProps, + popupProps, +}: AutocompleteContentProps) { + const { side, align } = parsePlacement(placement) + + return ( + + + + {children} + + + + ) +} + +export function AutocompleteList({ + className, + ...props +}: BaseAutocomplete.List.Props) { + return ( + + ) +} + +export function AutocompleteItem({ + className, + ...props +}: BaseAutocomplete.Item.Props) { + return ( + + ) +} + +export type AutocompleteItemTextProps = HTMLAttributes + +export function AutocompleteItemText({ + className, + ...props +}: AutocompleteItemTextProps) { + return ( + + ) +} + +export function AutocompleteLabel({ + className, + ...props +}: BaseAutocomplete.GroupLabel.Props) { + return ( + + ) +} + +export function AutocompleteSeparator({ + className, + ...props +}: BaseAutocomplete.Separator.Props) { + return ( + + ) +} + +export function AutocompleteEmpty({ + className, + ...props +}: BaseAutocomplete.Empty.Props) { + return ( + + ) +} + +export function AutocompleteStatus({ + className, + ...props +}: BaseAutocomplete.Status.Props) { + return ( + + ) +} + +export function AutocompleteItemIndicator({ + className, + children, + ...props +}: HTMLAttributes) { + return ( + + {children ?? + ) +} diff --git a/packages/dify-ui/src/combobox/__tests__/index.spec.tsx b/packages/dify-ui/src/combobox/__tests__/index.spec.tsx new file mode 100644 index 0000000000..705ebe9601 --- /dev/null +++ b/packages/dify-ui/src/combobox/__tests__/index.spec.tsx @@ -0,0 +1,363 @@ +import type { ReactNode } from 'react' +import { render } from 'vitest-browser-react' +import { + Combobox, + ComboboxChip, + ComboboxChipRemove, + ComboboxChips, + ComboboxClear, + ComboboxContent, + ComboboxEmpty, + ComboboxGroup, + ComboboxGroupLabel, + ComboboxInput, + ComboboxInputGroup, + ComboboxInputTrigger, + ComboboxItem, + ComboboxItemIndicator, + ComboboxItemText, + ComboboxLabel, + ComboboxList, + ComboboxSeparator, + ComboboxStatus, + ComboboxTrigger, + ComboboxValue, +} from '../index' + +const renderWithSafeViewport = (ui: ReactNode) => render( +
+ {ui} +
, +) + +const asHTMLElement = (element: HTMLElement | SVGElement) => element as HTMLElement + +const renderSelectLikeCombobox = ({ + children, + open = false, +}: { + children?: ReactNode + open?: boolean +} = {}) => renderWithSafeViewport( + + {children ?? ( + <> + Resource type + + + + + 2 options + + + Workflow + + + + Dataset + + + No options + + + )} + , +) + +const renderInputCombobox = ({ + children, + open = false, +}: { + children?: ReactNode + open?: boolean +} = {}) => renderWithSafeViewport( + + {children ?? ( + <> + + + + + + + + + Workflow + + + + + + )} + , +) + +describe('Combobox wrappers', () => { + describe('Select-like trigger', () => { + it('should render label and apply medium trigger classes by default', async () => { + const screen = await renderSelectLikeCombobox() + + await expect.element(screen.getByText('Resource type')).toHaveClass('system-sm-medium') + await expect.element(screen.getByRole('combobox', { name: 'Resource type' })).toHaveClass('rounded-lg') + await expect.element(screen.getByRole('combobox', { name: 'Resource type' })).toHaveClass('system-sm-regular') + }) + + it('should apply small and large trigger size variants', async () => { + const smallScreen = await renderSelectLikeCombobox({ + children: ( + + + + ), + }) + + await expect.element(smallScreen.getByRole('combobox', { name: 'Small resource type' })).toHaveClass('rounded-md') + await expect.element(smallScreen.getByRole('combobox', { name: 'Small resource type' })).toHaveClass('system-xs-regular') + + const largeScreen = await renderSelectLikeCombobox({ + children: ( + + + + ), + }) + + await expect.element(largeScreen.getByRole('combobox', { name: 'Large resource type' })).toHaveClass('rounded-[10px]') + await expect.element(largeScreen.getByRole('combobox', { name: 'Large resource type' })).toHaveClass('system-md-regular') + }) + + it('should render default trigger icon and support hiding it', async () => { + const withIcon = await renderSelectLikeCombobox() + + expect(withIcon.getByTestId('trigger').element().querySelector('.i-ri-arrow-down-s-line')).toHaveAttribute('aria-hidden', 'true') + + const withoutIcon = await renderSelectLikeCombobox({ + children: ( + + + + ), + }) + + expect(withoutIcon.getByRole('combobox', { name: 'Resource type without icon' }).element().querySelector('.i-ri-arrow-down-s-line')).not.toBeInTheDocument() + }) + }) + + describe('Input group and controls', () => { + it('should apply medium input group and input classes by default', async () => { + const screen = await renderInputCombobox() + + await expect.element(screen.getByTestId('input-group')).toHaveClass('rounded-lg') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveClass('px-3') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveClass('system-sm-regular') + }) + + it('should apply large input group and input classes when large size is provided', async () => { + const screen = await renderInputCombobox({ + children: ( + + + + ), + }) + + await expect.element(screen.getByTestId('input-group')).toHaveClass('rounded-[10px]') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveClass('px-4') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveClass('system-md-regular') + }) + + it('should set input defaults and forward passthrough props', async () => { + const screen = await renderInputCombobox({ + children: ( + + + + ), + }) + + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveAttribute('autocomplete', 'off') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveAttribute('type', 'text') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveAttribute('placeholder', 'Find a resource') + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toBeRequired() + await expect.element(screen.getByRole('combobox', { name: 'Search resources' })).toHaveClass('custom-input') + }) + + it('should provide fallback aria labels and decorative icons for input controls', async () => { + const screen = await renderInputCombobox() + + await expect.element(screen.getByRole('button', { name: 'Clear combobox' })).toHaveAttribute('type', 'button') + await expect.element(screen.getByRole('button', { name: 'Open combobox options' })).toHaveAttribute('type', 'button') + expect(screen.getByRole('button', { name: 'Clear combobox' }).element().querySelector('.i-ri-close-line')).toHaveAttribute('aria-hidden', 'true') + expect(screen.getByRole('button', { name: 'Open combobox options' }).element().querySelector('.i-ri-arrow-down-s-line')).toHaveAttribute('aria-hidden', 'true') + }) + + it('should rely on aria-labelledby when provided instead of injecting fallback labels', async () => { + const screen = await renderInputCombobox({ + children: ( + <> + Clear from label + Trigger from label + + + + + + + ), + }) + + await expect.element(screen.getByRole('button', { name: 'Clear from label' })).not.toHaveAttribute('aria-label') + await expect.element(screen.getByRole('button', { name: 'Trigger from label' })).not.toHaveAttribute('aria-label') + }) + }) + + describe('Content and options', () => { + it('should use default overlay placement and Dify popup classes', async () => { + const screen = await renderSelectLikeCombobox({ open: true }) + + await expect.element(screen.getByRole('group', { name: 'combobox positioner' })).toHaveAttribute('data-side', 'bottom') + await expect.element(screen.getByRole('group', { name: 'combobox positioner' })).toHaveAttribute('data-align', 'start') + await expect.element(screen.getByRole('group', { name: 'combobox positioner' })).toHaveClass('z-1002') + await expect.element(screen.getByRole('dialog', { name: 'combobox popup' })).toHaveClass('rounded-xl') + await expect.element(screen.getByRole('dialog', { name: 'combobox popup' })).toHaveClass('w-(--anchor-width)') + await expect.element(screen.getByRole('listbox', { name: 'combobox list' })).toHaveClass('scroll-py-1') + }) + + it('should apply custom placement side and passthrough popup props', async () => { + const onPopupClick = vi.fn() + const screen = await renderWithSafeViewport( + + + + + + + + Workflow + + + + , + ) + + asHTMLElement(screen.getByRole('dialog', { name: 'combobox popup' }).element()).click() + + await expect.element(screen.getByRole('group', { name: 'combobox positioner' })).toHaveAttribute('data-side', 'top') + expect(onPopupClick).toHaveBeenCalledTimes(1) + }) + + it('should render item text indicator status and empty wrappers with design classes', async () => { + const screen = await renderSelectLikeCombobox({ open: true }) + + await expect.element(screen.getByTestId('list').getByText('Workflow')).toHaveClass('system-sm-medium') + await expect.element(screen.getByTestId('status')).toHaveClass('text-text-tertiary') + await expect.element(screen.getByTestId('empty')).toHaveClass('system-sm-regular') + expect(screen.getByTestId('list').getByText('Workflow').element().parentElement?.querySelector('.i-ri-check-line')).toHaveAttribute('aria-hidden', 'true') + }) + + it('should forward custom classes to group label separator item text and indicator', async () => { + const screen = await renderWithSafeViewport( + + + + + + + + Resources + + + Workflow + + + + + + , + ) + + await expect.element(screen.getByText('Resources')).toHaveClass('custom-label') + await expect.element(screen.getByTestId('separator')).toHaveClass('custom-separator') + await expect.element(screen.getByRole('option', { name: 'Workflow' })).toHaveClass('custom-item') + await expect.element(screen.getByTestId('custom-list').getByText('Workflow')).toHaveClass('custom-text') + await expect.element(screen.getByTestId('indicator')).toHaveClass('custom-indicator') + }) + }) + + describe('Multiple selection chips', () => { + it('should render chip wrappers and default remove button label', async () => { + const screen = await renderWithSafeViewport( + + + + {(selectedValue: string[]) => ( + + {selectedValue.map(item => ( + + {item} + + + ))} + + )} + + + + , + ) + + await expect.element(screen.getByTestId('chips')).toHaveClass('custom-chips') + await expect.element(screen.getByText('maya').element().parentElement!).toHaveClass('custom-chip') + await expect.element(screen.getByRole('button', { name: 'Remove selected item' })).toHaveAttribute('type', 'button') + expect(screen.getByTestId('remove-chip').element().querySelector('.i-ri-close-line')).toHaveAttribute('aria-hidden', 'true') + }) + + it('should preserve chip remove aria-labelledby over fallback label', async () => { + const screen = await renderWithSafeViewport( + + + + {(selectedValue: string[]) => ( + + {selectedValue.map(item => ( + + Remove Maya + + + ))} + + )} + + + + , + ) + + await expect.element(screen.getByRole('button', { name: 'Remove Maya' })).not.toHaveAttribute('aria-label') + }) + }) +}) diff --git a/packages/dify-ui/src/combobox/index.stories.tsx b/packages/dify-ui/src/combobox/index.stories.tsx new file mode 100644 index 0000000000..f2b5f4d4c6 --- /dev/null +++ b/packages/dify-ui/src/combobox/index.stories.tsx @@ -0,0 +1,618 @@ +import type { Meta, StoryObj } from '@storybook/react-vite' +import type { Virtualizer } from '@tanstack/react-virtual' +import type { RefObject } from 'react' +import { useVirtualizer } from '@tanstack/react-virtual' +import { useEffect, useRef, useState } from 'react' +import { + Combobox, + ComboboxChip, + ComboboxChipRemove, + ComboboxChips, + ComboboxClear, + ComboboxCollection, + ComboboxContent, + ComboboxEmpty, + ComboboxGroup, + ComboboxGroupLabel, + ComboboxInput, + ComboboxInputGroup, + ComboboxInputTrigger, + ComboboxItem, + ComboboxItemIndicator, + ComboboxItemText, + ComboboxLabel, + ComboboxList, + ComboboxSeparator, + ComboboxStatus, + ComboboxTrigger, + ComboboxValue, + useComboboxFilteredItems, +} from '.' +import { cn } from '../cn' + +type Option = { + value: string + label: string + meta?: string + icon?: string + disabled?: boolean +} + +type OptionGroup = { + label: string + items: Option[] +} + +const fieldWidth = 'w-80' +const wideFieldWidth = 'w-[520px]' +const nativeFieldLabelClassName = 'mb-1 block text-text-secondary system-sm-medium' + +type StoryVirtualizer = Virtualizer + +const scrollHighlightedVirtualItem = ( + item: unknown, + { + reason, + index, + }: { + reason: 'keyboard' | 'pointer' | 'none' + index: number + }, + virtualizer: StoryVirtualizer | null, +) => { + if (!item || !virtualizer) + return + + const isStart = index === 0 + const isEnd = index === virtualizer.options.count - 1 + const shouldScroll = reason === 'none' || (reason === 'keyboard' && (isStart || isEnd)) + + if (shouldScroll) { + queueMicrotask(() => { + virtualizer.scrollToIndex(index, { align: isEnd ? 'start' : 'end' }) + }) + } +} + +const providerOptions: Option[] = [ + { value: 'openai', label: 'OpenAI', meta: 'GPT-5, GPT-4.1', icon: 'i-ri-openai-fill' }, + { value: 'anthropic', label: 'Anthropic', meta: 'Claude Opus, Sonnet', icon: 'i-ri-sparkling-2-line' }, + { value: 'google', label: 'Google', meta: 'Gemini 2.5', icon: 'i-ri-google-fill' }, + { value: 'azure-openai', label: 'Azure OpenAI', meta: 'Enterprise workspace', icon: 'i-ri-microsoft-fill' }, + { value: 'localai', label: 'LocalAI', meta: 'Self-hosted endpoint', icon: 'i-ri-server-line', disabled: true }, +] + +const dataSourceOptions: Option[] = [ + { value: 'knowledge-base', label: 'Knowledge Base', meta: 'Vector index', icon: 'i-ri-database-2-line' }, + { value: 'notion', label: 'Notion', meta: 'Synced pages', icon: 'i-ri-notion-fill' }, + { value: 'website', label: 'Website crawler', meta: 'Public URLs', icon: 'i-ri-global-line' }, + { value: 's3', label: 'S3 bucket', meta: 'Private files', icon: 'i-ri-cloud-line' }, + { value: 'slack', label: 'Slack', meta: 'Channel history', icon: 'i-ri-slack-fill' }, +] + +const reviewerOptions: Option[] = [ + { value: 'maya', label: 'Maya Chen', meta: 'Product owner' }, + { value: 'liam', label: 'Liam Brooks', meta: 'Prompt engineer' }, + { value: 'nora', label: 'Nora Park', meta: 'Data steward' }, + { value: 'owen', label: 'Owen Reed', meta: 'Security reviewer' }, + { value: 'yuki', label: 'Yuki Tanaka', meta: 'ML engineer' }, +] + +const toolGroups: OptionGroup[] = [ + { + label: 'Retrieval', + items: [ + { value: 'dataset-search', label: 'Dataset search', meta: 'Search workspace knowledge', icon: 'i-ri-search-eye-line' }, + { value: 'web-scraper', label: 'Web scraper', meta: 'Fetch public pages', icon: 'i-ri-global-line' }, + ], + }, + { + label: 'Actions', + items: [ + { value: 'http-request', label: 'HTTP request', meta: 'Call external APIs', icon: 'i-ri-terminal-box-line' }, + { value: 'code-runner', label: 'Code runner', meta: 'Execute sandboxed scripts', icon: 'i-ri-code-s-slash-line' }, + ], + }, + { + label: 'Operations', + items: [ + { value: 'human-review', label: 'Human review', meta: 'Assign approval task', icon: 'i-ri-user-voice-line' }, + { value: 'audit-log', label: 'Audit log', meta: 'Record workflow events', icon: 'i-ri-file-list-3-line' }, + ], + }, +] + +const tagOptions: Option[] = [ + { value: 'rag', label: 'RAG' }, + { value: 'agent', label: 'Agent' }, + { value: 'production', label: 'Production' }, + { value: 'evaluation', label: 'Evaluation' }, + { value: 'finance', label: 'Finance' }, + { value: 'support', label: 'Support' }, +] + +const directoryOptions: Option[] = [ + { value: 'maya-chen', label: 'Maya Chen', meta: 'Product owner · maya@example.com', icon: 'i-ri-user-3-line' }, + { value: 'liam-brooks', label: 'Liam Brooks', meta: 'Prompt engineer · liam@example.com', icon: 'i-ri-user-3-line' }, + { value: 'nora-park', label: 'Nora Park', meta: 'Data steward · nora@example.com', icon: 'i-ri-user-3-line' }, + { value: 'owen-reed', label: 'Owen Reed', meta: 'Security reviewer · owen@example.com', icon: 'i-ri-shield-user-line' }, + { value: 'yuki-tanaka', label: 'Yuki Tanaka', meta: 'ML engineer · yuki@example.com', icon: 'i-ri-user-3-line' }, + { value: 'ava-martin', label: 'Ava Martin', meta: 'Support lead · ava@example.com', icon: 'i-ri-customer-service-2-line' }, +] + +const emptyOptions: Option[] = [ + { value: 'billing', label: 'Billing connector' }, + { value: 'zendesk', label: 'Zendesk' }, + { value: 'github', label: 'GitHub issues' }, +] + +const modelCatalogOptions: Option[] = Array.from({ length: 1000 }, (_, index) => { + const provider = ['OpenAI', 'Anthropic', 'Google', 'Mistral', 'DeepSeek'][index % 5]! + const family = ['chat', 'reasoning', 'vision', 'embedding'][index % 4]! + const number = new Intl.NumberFormat('en-US', { + minimumIntegerDigits: 4, + }).format(index + 1) + + return { + value: `model-${index + 1}`, + label: `${provider} ${family} ${number}`, + meta: `${provider} provider · ${family}`, + icon: family === 'embedding' + ? 'i-ri-vector-triangle' + : family === 'vision' + ? 'i-ri-image-circle-line' + : family === 'reasoning' + ? 'i-ri-brain-line' + : 'i-ri-chat-1-line', + } +}) + +const sizeOptions: Option[] = providerOptions.slice(0, 3) +const defaultProvider = providerOptions[0]! +const disabledProvider = providerOptions[1]! +const defaultDataSource = dataSourceOptions[0]! +const defaultPopupDataSource = dataSourceOptions[1]! +const readOnlyDataSource = dataSourceOptions[2]! +const defaultTool = toolGroups[0]!.items[0]! +const defaultReviewers = [reviewerOptions[0]!, reviewerOptions[2]!] +const defaultTag = tagOptions[2]! + +const renderOptionItem = (option: Option, index?: number) => ( + + + {option.icon && } + + {option.label} + {option.meta && {option.meta}} + + + + +) + +const renderSimpleOptionItem = (option: Option, index?: number) => ( + + {option.label} + + +) + +const PopupSearchInput = ({ + label, + placeholder, +}: { + label: string + placeholder: string +}) => ( + + + + + +) + +const GroupedToolList = () => { + const groups = useComboboxFilteredItems() + + return ( + + {groups.map((group, groupIndex) => ( + + {groupIndex > 0 && } + {group.label} + + {(option: Option) => renderOptionItem(option)} + + + ))} + + ) +} + +const VirtualizedModelList = ({ + virtualizerRef, +}: { + virtualizerRef: RefObject +}) => { + const scrollRef = useRef(null) + const filteredItems = useComboboxFilteredItems